summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoreroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-02-15 05:12:47 +0000
committereroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-02-15 05:12:47 +0000
commit515177468b25f61284c8f54f2894f4806e6b0bd3 (patch)
tree627244616f447e0350c5c1574317bda6613393e2
parentc65c6e3150af3dc37471c76b10d2b05cf7367aa3 (diff)
downloadchromium_src-515177468b25f61284c8f54f2894f4806e6b0bd3.zip
chromium_src-515177468b25f61284c8f54f2894f4806e6b0bd3.tar.gz
chromium_src-515177468b25f61284c8f54f2894f4806e6b0bd3.tar.bz2
Remove PassiveLogCollector, and its supporting infrastructure.
This makes it so that about:net-internals no longer includes "passively captured" events. This wasn't a frequently used feature, yet was adding some memory bloat to the browser process. BUG=90789 Review URL: http://codereview.chromium.org/9363055 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@122049 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/browser/browsing_data_remover.cc8
-rw-r--r--chrome/browser/io_thread.cc6
-rw-r--r--chrome/browser/net/chrome_net_log.cc60
-rw-r--r--chrome/browser/net/chrome_net_log.h60
-rw-r--r--chrome/browser/net/chrome_net_log_unittest.cc45
-rw-r--r--chrome/browser/net/passive_log_collector.cc902
-rw-r--r--chrome/browser/net/passive_log_collector.h540
-rw-r--r--chrome/browser/net/passive_log_collector_unittest.cc494
-rw-r--r--chrome/browser/resources/net_internals/browser_bridge.js6
-rw-r--r--chrome/browser/resources/net_internals/capture_view.html8
-rw-r--r--chrome/browser/resources/net_internals/capture_view.js17
-rw-r--r--chrome/browser/resources/net_internals/help.html9
-rw-r--r--chrome/browser/resources/net_internals/log_util.js27
-rw-r--r--chrome/browser/resources/net_internals/log_view_painter.js12
-rw-r--r--chrome/browser/resources/net_internals/proxy_view.html11
-rw-r--r--chrome/browser/resources/net_internals/proxy_view.js71
-rw-r--r--chrome/browser/resources/net_internals/source_tracker.js43
-rw-r--r--chrome/browser/ui/webui/net_internals/net_internals_ui.cc27
-rw-r--r--chrome/chrome_browser.gypi2
-rw-r--r--chrome/chrome_tests.gypi1
20 files changed, 81 insertions, 2268 deletions
diff --git a/chrome/browser/browsing_data_remover.cc b/chrome/browser/browsing_data_remover.cc
index c2bf211..ea084bb 100644
--- a/chrome/browser/browsing_data_remover.cc
+++ b/chrome/browser/browsing_data_remover.cc
@@ -22,7 +22,6 @@
#include "chrome/browser/extensions/extension_special_storage_policy.h"
#include "chrome/browser/history/history.h"
#include "chrome/browser/io_thread.h"
-#include "chrome/browser/net/chrome_net_log.h"
#include "chrome/browser/net/chrome_url_request_context.h"
#include "chrome/browser/net/predictor.h"
#include "chrome/browser/password_manager/password_store.h"
@@ -412,13 +411,6 @@ void BrowsingDataRemover::NotifyAndDeleteIfDone() {
if (!all_done())
return;
- // The NetLog contains download history, but may also contain form data,
- // cookies and passwords. Simplest just to always clear it. Must be cleared
- // after the cache, as cleaning up the disk cache exposes some of the history
- // in the NetLog.
- if (g_browser_process->net_log())
- g_browser_process->net_log()->ClearAllPassivelyCapturedEvents();
-
set_removing(false);
// Send global notification, then notify any explicit observers.
diff --git a/chrome/browser/io_thread.cc b/chrome/browser/io_thread.cc
index cecb2d0..d2f6006 100644
--- a/chrome/browser/io_thread.cc
+++ b/chrome/browser/io_thread.cc
@@ -25,7 +25,6 @@
#include "chrome/browser/net/chrome_network_delegate.h"
#include "chrome/browser/net/chrome_url_request_context.h"
#include "chrome/browser/net/connect_interceptor.h"
-#include "chrome/browser/net/passive_log_collector.h"
#include "chrome/browser/net/pref_proxy_config_tracker.h"
#include "chrome/browser/net/proxy_service_factory.h"
#include "chrome/browser/net/sdch_dictionary_fetcher.h"
@@ -557,11 +556,6 @@ void IOThread::ChangedToOnTheRecordOnIOThread() {
// Clear the host cache to avoid showing entries from the OTR session
// in about:net-internals.
ClearHostCache();
-
- // Clear all of the passively logged data.
- // TODO(eroman): this is a bit heavy handed, really all we need to do is
- // clear the data pertaining to incognito context.
- net_log_->ClearAllPassivelyCapturedEvents();
}
void IOThread::InitSystemRequestContext() {
diff --git a/chrome/browser/net/chrome_net_log.cc b/chrome/browser/net/chrome_net_log.cc
index 212dc46..86c8719 100644
--- a/chrome/browser/net/chrome_net_log.cc
+++ b/chrome/browser/net/chrome_net_log.cc
@@ -1,11 +1,9 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/net/chrome_net_log.h"
-#include <algorithm>
-
#include "base/command_line.h"
#include "base/logging.h"
#include "base/string_number_conversions.h"
@@ -13,7 +11,6 @@
#include "base/values.h"
#include "chrome/browser/net/load_timing_observer.h"
#include "chrome/browser/net/net_log_logger.h"
-#include "chrome/browser/net/passive_log_collector.h"
#include "chrome/common/chrome_switches.h"
ChromeNetLog::ThreadSafeObserverImpl::ThreadSafeObserverImpl(LogLevel log_level)
@@ -46,16 +43,6 @@ void ChromeNetLog::ThreadSafeObserverImpl::SetLogLevel(
net_log_->UpdateLogLevel();
}
-void ChromeNetLog::ThreadSafeObserverImpl::
-AddAsObserverAndGetAllPassivelyCapturedEvents(
- ChromeNetLog* net_log,
- EntryList* entries) {
- DCHECK(!net_log_);
- net_log_ = net_log;
- net_log_->AddObserverAndGetAllPassivelyCapturedEvents(&internal_observer_,
- entries);
-}
-
void ChromeNetLog::ThreadSafeObserverImpl::AssertNetLogLockAcquired() const {
if (net_log_)
net_log_->lock_.AssertAcquired();
@@ -82,27 +69,10 @@ void ChromeNetLog::ThreadSafeObserverImpl::PassThroughObserver::SetLogLevel(
log_level_ = log_level;
}
-ChromeNetLog::Entry::Entry(uint32 order,
- net::NetLog::EventType type,
- const base::TimeTicks& time,
- net::NetLog::Source source,
- net::NetLog::EventPhase phase,
- net::NetLog::EventParameters* params)
- : order(order),
- type(type),
- time(time),
- source(source),
- phase(phase),
- params(params) {
-}
-
-ChromeNetLog::Entry::~Entry() {}
-
ChromeNetLog::ChromeNetLog()
: last_id_(0),
base_log_level_(LOG_BASIC),
effective_log_level_(LOG_BASIC),
- passive_collector_(new PassiveLogCollector),
load_timing_observer_(new LoadTimingObserver) {
const CommandLine* command_line = CommandLine::ForCurrentProcess();
// Adjust base log level based on command line switch, if present.
@@ -119,7 +89,6 @@ ChromeNetLog::ChromeNetLog()
}
}
- passive_collector_->AddAsObserver(this);
load_timing_observer_->AddAsObserver(this);
if (command_line->HasSwitch(switches::kLogNetLog)) {
@@ -130,7 +99,6 @@ ChromeNetLog::ChromeNetLog()
}
ChromeNetLog::~ChromeNetLog() {
- passive_collector_->RemoveAsObserver();
load_timing_observer_->RemoveAsObserver();
if (net_log_logger_.get()) {
net_log_logger_->RemoveAsObserver();
@@ -162,7 +130,8 @@ net::NetLog::LogLevel ChromeNetLog::GetLogLevel() const {
void ChromeNetLog::AddThreadSafeObserver(
net::NetLog::ThreadSafeObserver* observer) {
base::AutoLock lock(lock_);
- AddObserverWhileLockHeld(observer);
+ observers_.AddObserver(observer);
+ UpdateLogLevel();
}
void ChromeNetLog::RemoveThreadSafeObserver(
@@ -172,23 +141,6 @@ void ChromeNetLog::RemoveThreadSafeObserver(
UpdateLogLevel();
}
-void ChromeNetLog::AddObserverAndGetAllPassivelyCapturedEvents(
- net::NetLog::ThreadSafeObserver* observer, EntryList* passive_entries) {
- base::AutoLock lock(lock_);
- AddObserverWhileLockHeld(observer);
- passive_collector_->GetAllCapturedEvents(passive_entries);
-}
-
-void ChromeNetLog::GetAllPassivelyCapturedEvents(EntryList* passive_entries) {
- base::AutoLock lock(lock_);
- passive_collector_->GetAllCapturedEvents(passive_entries);
-}
-
-void ChromeNetLog::ClearAllPassivelyCapturedEvents() {
- base::AutoLock lock(lock_);
- passive_collector_->Clear();
-}
-
void ChromeNetLog::UpdateLogLevel() {
lock_.AssertAcquired();
@@ -204,9 +156,3 @@ void ChromeNetLog::UpdateLogLevel() {
base::subtle::NoBarrier_Store(&effective_log_level_,
new_effective_log_level);
}
-
-void ChromeNetLog::AddObserverWhileLockHeld(
- net::NetLog::ThreadSafeObserver* observer) {
- observers_.AddObserver(observer);
- UpdateLogLevel();
-}
diff --git a/chrome/browser/net/chrome_net_log.h b/chrome/browser/net/chrome_net_log.h
index a032c33..78ee636 100644
--- a/chrome/browser/net/chrome_net_log.h
+++ b/chrome/browser/net/chrome_net_log.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,8 +6,6 @@
#define CHROME_BROWSER_NET_CHROME_NET_LOG_H_
#pragma once
-#include <vector>
-
#include "base/atomicops.h"
#include "base/memory/scoped_ptr.h"
#include "base/observer_list.h"
@@ -17,7 +15,6 @@
class LoadTimingObserver;
class NetLogLogger;
-class PassiveLogCollector;
// ChromeNetLog is an implementation of NetLog that dispatches network log
// messages to a list of observers.
@@ -25,35 +22,8 @@ class PassiveLogCollector;
// All methods are thread safe, with the exception that no ChromeNetLog or
// ChromeNetLog::ThreadSafeObserverImpl functions may be called by an observer's
// OnAddEntry() method. Doing so will result in a deadlock.
-//
-// By default, ChromeNetLog will attach the observer PassiveLogCollector which
-// will keep track of recent request information (which used when displaying
-// the about:net-internals page).
-//
class ChromeNetLog : public net::NetLog {
public:
- // This structure encapsulates all of the parameters of an event,
- // including an "order" field that identifies when it was captured relative
- // to other events.
- struct Entry {
- Entry(uint32 order,
- net::NetLog::EventType type,
- const base::TimeTicks& time,
- net::NetLog::Source source,
- net::NetLog::EventPhase phase,
- net::NetLog::EventParameters* params);
- ~Entry();
-
- uint32 order;
- net::NetLog::EventType type;
- base::TimeTicks time;
- net::NetLog::Source source;
- net::NetLog::EventPhase phase;
- scoped_refptr<net::NetLog::EventParameters> params;
- };
-
- typedef std::vector<Entry> EntryList;
-
// Base class for observing the events logged by the network
// stack. This has some nice-to-have functionality for use by code
// within chrome/, but any net::NetLog::ThreadSafeObserver may be
@@ -66,8 +36,7 @@ class ChromeNetLog : public net::NetLog {
class ThreadSafeObserverImpl {
public:
// Observers that need to see the full granularity of events can
- // specify LOG_ALL. However doing so will have performance consequences,
- // and may cause PassiveLogCollector to use more memory than anticipated.
+ // specify LOG_ALL.
explicit ThreadSafeObserverImpl(LogLevel log_level);
virtual ~ThreadSafeObserverImpl();
@@ -85,10 +54,6 @@ class ChromeNetLog : public net::NetLog {
void SetLogLevel(LogLevel log_level);
- void AddAsObserverAndGetAllPassivelyCapturedEvents(
- ChromeNetLog *net_log,
- EntryList* passive_entries);
-
protected:
void AssertNetLogLockAcquired() const;
@@ -135,35 +100,21 @@ class ChromeNetLog : public net::NetLog {
virtual uint32 NextID() OVERRIDE;
virtual LogLevel GetLogLevel() const OVERRIDE;
- void GetAllPassivelyCapturedEvents(EntryList* passive_entries);
-
- void ClearAllPassivelyCapturedEvents();
-
LoadTimingObserver* load_timing_observer() {
return load_timing_observer_.get();
}
private:
- void AddObserverWhileLockHeld(ThreadSafeObserver* observer);
-
// NetLog implementation
virtual void AddThreadSafeObserver(ThreadSafeObserver* observer) OVERRIDE;
virtual void RemoveThreadSafeObserver(ThreadSafeObserver* observer) OVERRIDE;
- // Adds |observer| and writes all passively captured events to
- // |passive_entries|. Guarantees that no events in |passive_entries| will be
- // sent to |observer| and all future events that have yet been sent to the
- // PassiveLogCollector will be sent to |observer|.
- void AddObserverAndGetAllPassivelyCapturedEvents(ThreadSafeObserver* observer,
- EntryList* passive_entries);
-
-
// Called whenever an observer is added or removed, or changes its log level.
// Must have acquired |lock_| prior to calling.
void UpdateLogLevel();
- // |lock_| protects access to |observers_| and, indirectly, to
- // |passive_collector_|. Should not be acquired by observers.
+ // |lock_| protects access to |observers_| and, indirectly, to. Should not
+ // be acquired by observers.
base::Lock lock_;
// Last assigned source ID. Incremented to get the next one.
@@ -176,9 +127,6 @@ class ChromeNetLog : public net::NetLog {
// The current log level.
base::subtle::Atomic32 effective_log_level_;
- // Not thread safe. Must only be used when |lock_| is acquired.
- scoped_ptr<PassiveLogCollector> passive_collector_;
-
scoped_ptr<LoadTimingObserver> load_timing_observer_;
scoped_ptr<NetLogLogger> net_log_logger_;
diff --git a/chrome/browser/net/chrome_net_log_unittest.cc b/chrome/browser/net/chrome_net_log_unittest.cc
index dead901..9369f65 100644
--- a/chrome/browser/net/chrome_net_log_unittest.cc
+++ b/chrome/browser/net/chrome_net_log_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -31,7 +31,6 @@ class ChromeNetLogTestThread : public base::SimpleThread {
log_->AddEntry(net::NetLog::TYPE_SOCKET_ALIVE, base::TimeTicks(),
source, net::NetLog::PHASE_BEGIN, NULL);
}
- log_->ClearAllPassivelyCapturedEvents();
}
void ReallyStart() {
@@ -48,14 +47,41 @@ class ChromeNetLogTestThread : public base::SimpleThread {
DISALLOW_COPY_AND_ASSIGN(ChromeNetLogTestThread);
};
+class CountingObserver : public ChromeNetLog::ThreadSafeObserverImpl {
+ public:
+ CountingObserver()
+ : ChromeNetLog::ThreadSafeObserverImpl(net::NetLog::LOG_ALL),
+ count_(0) {}
+
+ virtual void OnAddEntry(net::NetLog::EventType type,
+ const base::TimeTicks& time,
+ const net::NetLog::Source& source,
+ net::NetLog::EventPhase phase,
+ net::NetLog::EventParameters* params) OVERRIDE {
+ count_++;
+ }
+
+ int count() const { return count_; }
+
+ private:
+ int count_;
+};
+
} // namespace
-// Attempts to check thread safety, exercising checks in ChromeNetLog and
-// PassiveLogCollector.
+// Makes sure that events are dispatched to all observers, and that this
+// operation works correctly when run on multiple threads.
TEST(ChromeNetLogTest, NetLogThreads) {
ChromeNetLog log;
ChromeNetLogTestThread threads[kThreads];
+ // Attach some observers
+ CountingObserver observers[3];
+ for (size_t i = 0; i < arraysize(observers); ++i)
+ observers[i].AddAsObserver(&log);
+
+ // Run a bunch of threads to completion, each of which will emit events to
+ // |log|.
for (int i = 0; i < kThreads; ++i) {
threads[i].Init(&log);
threads[i].Start();
@@ -67,7 +93,12 @@ TEST(ChromeNetLogTest, NetLogThreads) {
for (int i = 0; i < kThreads; ++i)
threads[i].Join();
- ChromeNetLog::EntryList entries;
- log.GetAllPassivelyCapturedEvents(&entries);
- EXPECT_EQ(0u, entries.size());
+ // Check that each observer saw the emitted events.
+ const int kTotalEvents = kThreads * kEvents;
+ for (size_t i = 0; i < arraysize(observers); ++i)
+ EXPECT_EQ(kTotalEvents, observers[i].count());
+
+ // Detach all the observers
+ for (size_t i = 0; i < arraysize(observers); ++i)
+ observers[i].RemoveAsObserver();
}
diff --git a/chrome/browser/net/passive_log_collector.cc b/chrome/browser/net/passive_log_collector.cc
deleted file mode 100644
index 661d60c..0000000
--- a/chrome/browser/net/passive_log_collector.cc
+++ /dev/null
@@ -1,902 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/net/passive_log_collector.h"
-
-#include <algorithm>
-
-#include "base/compiler_specific.h"
-#include "base/string_util.h"
-#include "base/format_macros.h"
-#include "net/url_request/url_request_netlog_params.h"
-
-namespace {
-
-// TODO(eroman): Do something with the truncation count.
-
-const size_t kMaxNumEntriesPerLog = 30;
-
-void AddEntryToSourceInfo(const ChromeNetLog::Entry& entry,
- PassiveLogCollector::SourceInfo* out_info) {
- // Start dropping new entries when the log has gotten too big.
- if (out_info->entries.size() + 1 <= kMaxNumEntriesPerLog) {
- out_info->entries.push_back(entry);
- } else {
- out_info->num_entries_truncated += 1;
- out_info->entries[kMaxNumEntriesPerLog - 1] = entry;
- }
-}
-
-// Comparator to sort entries by their |order| property, ascending.
-bool SortByOrderComparator(const ChromeNetLog::Entry& a,
- const ChromeNetLog::Entry& b) {
- return a.order < b.order;
-}
-
-} // namespace
-
-PassiveLogCollector::SourceInfo::SourceInfo()
- : source_id(net::NetLog::Source::kInvalidId),
- num_entries_truncated(0),
- reference_count(0),
- is_alive(true) {
-}
-
-PassiveLogCollector::SourceInfo::~SourceInfo() {}
-
-//----------------------------------------------------------------------------
-// PassiveLogCollector
-//----------------------------------------------------------------------------
-
-PassiveLogCollector::PassiveLogCollector()
- : ThreadSafeObserverImpl(net::NetLog::LOG_BASIC),
- ALLOW_THIS_IN_INITIALIZER_LIST(connect_job_tracker_(this)),
- ALLOW_THIS_IN_INITIALIZER_LIST(url_request_tracker_(this)),
- ALLOW_THIS_IN_INITIALIZER_LIST(socket_stream_tracker_(this)),
- ALLOW_THIS_IN_INITIALIZER_LIST(http_stream_job_tracker_(this)),
- ALLOW_THIS_IN_INITIALIZER_LIST(
- exponential_backoff_throttling_tracker_(this)),
- num_events_seen_(0) {
-
- // Define the mapping between source types and the tracker objects.
- memset(&trackers_[0], 0, sizeof(trackers_));
- trackers_[net::NetLog::SOURCE_NONE] = &global_source_tracker_;
- trackers_[net::NetLog::SOURCE_URL_REQUEST] = &url_request_tracker_;
- trackers_[net::NetLog::SOURCE_SOCKET_STREAM] = &socket_stream_tracker_;
- trackers_[net::NetLog::SOURCE_CONNECT_JOB] = &connect_job_tracker_;
- trackers_[net::NetLog::SOURCE_SOCKET] = &socket_tracker_;
- trackers_[net::NetLog::SOURCE_PROXY_SCRIPT_DECIDER] =
- &proxy_script_decider_tracker_;
- trackers_[net::NetLog::SOURCE_SPDY_SESSION] = &spdy_session_tracker_;
- trackers_[net::NetLog::SOURCE_HOST_RESOLVER_IMPL_REQUEST] =
- &dns_request_tracker_;
- trackers_[net::NetLog::SOURCE_HOST_RESOLVER_IMPL_JOB] = &dns_job_tracker_;
- trackers_[net::NetLog::SOURCE_HOST_RESOLVER_IMPL_PROC_TASK] =
- &dns_proc_task_tracker_;
- trackers_[net::NetLog::SOURCE_DISK_CACHE_ENTRY] = &disk_cache_entry_tracker_;
- trackers_[net::NetLog::SOURCE_MEMORY_CACHE_ENTRY] = &mem_cache_entry_tracker_;
- trackers_[net::NetLog::SOURCE_HTTP_STREAM_JOB] = &http_stream_job_tracker_;
- trackers_[net::NetLog::SOURCE_EXPONENTIAL_BACKOFF_THROTTLING] =
- &exponential_backoff_throttling_tracker_;
- trackers_[net::NetLog::SOURCE_DNS_TRANSACTION] = &dns_transaction_tracker_;
- trackers_[net::NetLog::SOURCE_ASYNC_HOST_RESOLVER_REQUEST] =
- &async_host_resolver_request_tracker_;
- trackers_[net::NetLog::SOURCE_UDP_SOCKET] = &udp_socket_tracker_;
- trackers_[net::NetLog::SOURCE_CERT_VERIFIER_JOB] =
- &cert_verifier_job_tracker_;
- trackers_[net::NetLog::SOURCE_HTTP_PIPELINED_CONNECTION] =
- &http_pipelined_connection_tracker_;
- trackers_[net::NetLog::SOURCE_DOWNLOAD] =
- &download_tracker_;
- trackers_[net::NetLog::SOURCE_FILESTREAM] =
- &file_stream_tracker_;
- // Make sure our mapping is up-to-date.
- for (size_t i = 0; i < arraysize(trackers_); ++i)
- DCHECK(trackers_[i]) << "Unhandled SourceType: " << i;
-}
-
-PassiveLogCollector::~PassiveLogCollector() {
-}
-
-void PassiveLogCollector::OnAddEntry(
- net::NetLog::EventType type,
- const base::TimeTicks& time,
- const net::NetLog::Source& source,
- net::NetLog::EventPhase phase,
- net::NetLog::EventParameters* params) {
- AssertNetLogLockAcquired();
- // Package the parameters into a single struct for convenience.
- ChromeNetLog::Entry entry(num_events_seen_++, type, time, source, phase,
- params);
-
- SourceTrackerInterface* tracker = GetTrackerForSourceType(entry.source.type);
- if (tracker)
- tracker->OnAddEntry(entry);
-}
-
-void PassiveLogCollector::Clear() {
- AssertNetLogLockAcquired();
- for (size_t i = 0; i < arraysize(trackers_); ++i)
- trackers_[i]->Clear();
-}
-
-PassiveLogCollector::SourceTrackerInterface*
-PassiveLogCollector::GetTrackerForSourceType(
- net::NetLog::SourceType source_type) {
- CHECK_LT(source_type, static_cast<int>(arraysize(trackers_)));
- CHECK_GE(source_type, 0);
- return trackers_[source_type];
-}
-
-void PassiveLogCollector::GetAllCapturedEvents(
- ChromeNetLog::EntryList* out) const {
- AssertNetLogLockAcquired();
- out->clear();
-
- // Append all of the captured entries held by the various trackers to
- // |out|.
- for (size_t i = 0; i < arraysize(trackers_); ++i)
- trackers_[i]->AppendAllEntries(out);
-
- // Now sort the list of entries by their insertion time (ascending).
- std::sort(out->begin(), out->end(), &SortByOrderComparator);
-}
-
-std::string PassiveLogCollector::SourceInfo::GetURL() const {
- // Note: we look at the first *two* entries, since the outer REQUEST_ALIVE
- // doesn't actually contain any data.
- for (size_t i = 0; i < 2 && i < entries.size(); ++i) {
- const ChromeNetLog::Entry& entry = entries[i];
- if (entry.phase == net::NetLog::PHASE_BEGIN && entry.params) {
- switch (entry.type) {
- case net::NetLog::TYPE_URL_REQUEST_START_JOB:
- return static_cast<net::URLRequestStartEventParameters*>(
- entry.params.get())->url().possibly_invalid_spec();
- case net::NetLog::TYPE_SOCKET_STREAM_CONNECT:
- return static_cast<net::NetLogStringParameter*>(
- entry.params.get())->value();
- default:
- break;
- }
- }
- }
- return std::string();
-}
-
-//----------------------------------------------------------------------------
-// GlobalSourceTracker
-//----------------------------------------------------------------------------
-
-PassiveLogCollector::GlobalSourceTracker::GlobalSourceTracker() {}
-PassiveLogCollector::GlobalSourceTracker::~GlobalSourceTracker() {}
-
-void PassiveLogCollector::GlobalSourceTracker::OnAddEntry(
- const ChromeNetLog::Entry& entry) {
- const size_t kMaxEntries = 30u;
- entries_.push_back(entry);
- if (entries_.size() > kMaxEntries)
- entries_.pop_front();
-}
-
-void PassiveLogCollector::GlobalSourceTracker::Clear() {
- entries_.clear();
-}
-
-void PassiveLogCollector::GlobalSourceTracker::AppendAllEntries(
- ChromeNetLog::EntryList* out) const {
- out->insert(out->end(), entries_.begin(), entries_.end());
-}
-
-//----------------------------------------------------------------------------
-// SourceTracker
-//----------------------------------------------------------------------------
-
-PassiveLogCollector::SourceTracker::SourceTracker(
- size_t max_num_sources,
- size_t max_graveyard_size,
- PassiveLogCollector* parent)
- : max_num_sources_(max_num_sources),
- max_graveyard_size_(max_graveyard_size),
- parent_(parent) {
-}
-
-PassiveLogCollector::SourceTracker::~SourceTracker() {}
-
-void PassiveLogCollector::SourceTracker::OnAddEntry(
- const ChromeNetLog::Entry& entry) {
- // Lookup or insert a new entry into the bounded map.
- SourceIDToInfoMap::iterator it = sources_.find(entry.source.id);
- if (it == sources_.end()) {
- if (sources_.size() >= max_num_sources_) {
- LOG(WARNING) << "The passive log data has grown larger "
- "than expected, resetting";
- Clear();
- }
- it = sources_.insert(
- SourceIDToInfoMap::value_type(entry.source.id, SourceInfo())).first;
- it->second.source_id = entry.source.id;
- }
-
- SourceInfo& info = it->second;
- Action result = DoAddEntry(entry, &info);
-
- if (result != ACTION_NONE) {
- // We are either queuing it for deletion, or deleting it immediately.
- // If someone else holds a reference to this source, defer the deletion
- // until all the references are released.
- info.is_alive = false;
- if (info.reference_count == 0) {
- switch (result) {
- case ACTION_MOVE_TO_GRAVEYARD:
- AddToDeletionQueue(info.source_id);
- break;
- case ACTION_DELETE:
- DeleteSourceInfo(info.source_id);
- break;
- default:
- NOTREACHED();
- break;
- }
- }
- }
-}
-
-void PassiveLogCollector::SourceTracker::DeleteSourceInfo(
- uint32 source_id) {
- SourceIDToInfoMap::iterator it = sources_.find(source_id);
- if (it == sources_.end()) {
- // TODO(eroman): Is this happening? And if so, why. Remove this
- // once the cause is understood.
- LOG(WARNING) << "Tried to delete info for nonexistent source";
- return;
- }
- // The source should not be in the deletion queue.
- CHECK(std::find(deletion_queue_.begin(), deletion_queue_.end(),
- source_id) == deletion_queue_.end());
- ReleaseAllReferencesToDependencies(&(it->second));
- sources_.erase(it);
-}
-
-void PassiveLogCollector::SourceTracker::Clear() {
- deletion_queue_.clear();
-
- // Release all references held to dependent sources.
- for (SourceIDToInfoMap::iterator it = sources_.begin();
- it != sources_.end();
- ++it) {
- ReleaseAllReferencesToDependencies(&(it->second));
- }
- sources_.clear();
-}
-
-void PassiveLogCollector::SourceTracker::AppendAllEntries(
- ChromeNetLog::EntryList* out) const {
- // Append all of the entries for each of the sources.
- for (SourceIDToInfoMap::const_iterator it = sources_.begin();
- it != sources_.end();
- ++it) {
- const SourceInfo& info = it->second;
- out->insert(out->end(), info.entries.begin(), info.entries.end());
- }
-}
-
-void PassiveLogCollector::SourceTracker::AddToDeletionQueue(
- uint32 source_id) {
- DCHECK(sources_.find(source_id) != sources_.end());
- DCHECK(!sources_.find(source_id)->second.is_alive);
- DCHECK_GE(sources_.find(source_id)->second.reference_count, 0);
- DCHECK_LE(deletion_queue_.size(), max_graveyard_size_);
-
- DCHECK(std::find(deletion_queue_.begin(), deletion_queue_.end(),
- source_id) == deletion_queue_.end());
- deletion_queue_.push_back(source_id);
-
- // After the deletion queue has reached its maximum size, start
- // deleting sources in FIFO order.
- if (deletion_queue_.size() > max_graveyard_size_) {
- uint32 oldest = deletion_queue_.front();
- deletion_queue_.pop_front();
- DeleteSourceInfo(oldest);
- }
-}
-
-void PassiveLogCollector::SourceTracker::EraseFromDeletionQueue(
- uint32 source_id) {
- DeletionQueue::iterator it =
- std::remove(deletion_queue_.begin(), deletion_queue_.end(),
- source_id);
- CHECK(it != deletion_queue_.end());
- deletion_queue_.erase(it);
-}
-
-void PassiveLogCollector::SourceTracker::AdjustReferenceCountForSource(
- int offset, uint32 source_id) {
- DCHECK(offset == -1 || offset == 1) << "invalid offset: " << offset;
-
- // In general it is invalid to call AdjustReferenceCountForSource() on
- // source that doesn't exist. However, it is possible that if
- // SourceTracker::Clear() was previously called this can happen.
- SourceIDToInfoMap::iterator it = sources_.find(source_id);
- if (it == sources_.end()) {
- LOG(WARNING) << "Released a reference to nonexistent source.";
- return;
- }
-
- SourceInfo& info = it->second;
- DCHECK_GE(info.reference_count, 0);
- info.reference_count += offset;
-
- bool released_unmatched_reference = info.reference_count < 0;
- if (released_unmatched_reference) {
- // In general this shouldn't happen, however it is possible to reach this
- // state if SourceTracker::Clear() was called earlier.
- LOG(WARNING) << "Released unmatched reference count.";
- info.reference_count = 0;
- }
-
- if (!info.is_alive) {
- if (info.reference_count == 1 && offset == 1) {
- // If we just added a reference to a dead source that had no references,
- // it must have been in the deletion queue, so remove it from the queue.
- EraseFromDeletionQueue(source_id);
- } else if (info.reference_count == 0) {
- if (released_unmatched_reference)
- EraseFromDeletionQueue(source_id);
- // If we just released the final reference to a dead source, go ahead
- // and delete it right away.
- DeleteSourceInfo(source_id);
- }
- }
-}
-
-void PassiveLogCollector::SourceTracker::AddReferenceToSourceDependency(
- const net::NetLog::Source& source, SourceInfo* info) {
- // Find the tracker which should be holding |source|.
- DCHECK(parent_);
- DCHECK_NE(source.type, net::NetLog::SOURCE_NONE);
- SourceTracker* tracker = static_cast<SourceTracker*>(
- parent_->GetTrackerForSourceType(source.type));
- DCHECK(tracker);
-
- // Tell the owning tracker to increment the reference count of |source|.
- tracker->AdjustReferenceCountForSource(1, source.id);
-
- // Make a note to release this reference once |info| is destroyed.
- info->dependencies.push_back(source);
-}
-
-void PassiveLogCollector::SourceTracker::ReleaseAllReferencesToDependencies(
- SourceInfo* info) {
- // Release all references |info| was holding to other sources.
- for (SourceDependencyList::const_iterator it = info->dependencies.begin();
- it != info->dependencies.end(); ++it) {
- const net::NetLog::Source& source = *it;
-
- // Find the tracker which should be holding |source|.
- DCHECK(parent_);
- DCHECK_NE(source.type, net::NetLog::SOURCE_NONE);
- SourceTracker* tracker = static_cast<SourceTracker*>(
- parent_->GetTrackerForSourceType(source.type));
- DCHECK(tracker);
-
- // Tell the owning tracker to decrement the reference count of |source|.
- tracker->AdjustReferenceCountForSource(-1, source.id);
- }
-
- info->dependencies.clear();
-}
-
-//----------------------------------------------------------------------------
-// ConnectJobTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::ConnectJobTracker::kMaxNumSources = 100;
-const size_t PassiveLogCollector::ConnectJobTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::ConnectJobTracker::ConnectJobTracker(
- PassiveLogCollector* parent)
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, parent) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::ConnectJobTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
-
- if (entry.type == net::NetLog::TYPE_CONNECT_JOB_SET_SOCKET) {
- const net::NetLog::Source& source_dependency =
- static_cast<net::NetLogSourceParameter*>(entry.params.get())->value();
- AddReferenceToSourceDependency(source_dependency, out_info);
- }
-
- // If this is the end of the connect job, move the source to the graveyard.
- if (entry.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
-
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// SocketTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::SocketTracker::kMaxNumSources = 200;
-const size_t PassiveLogCollector::SocketTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::SocketTracker::SocketTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::SocketTracker::DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- // TODO(eroman): aggregate the byte counts once truncation starts to happen,
- // to summarize transaction read/writes for each SOCKET_IN_USE
- // section.
- if (entry.type == net::NetLog::TYPE_SOCKET_BYTES_SENT ||
- entry.type == net::NetLog::TYPE_SOCKET_BYTES_RECEIVED ||
- entry.type == net::NetLog::TYPE_SSL_SOCKET_BYTES_SENT ||
- entry.type == net::NetLog::TYPE_SSL_SOCKET_BYTES_RECEIVED) {
- return ACTION_NONE;
- }
-
- AddEntryToSourceInfo(entry, out_info);
-
- if (entry.type == net::NetLog::TYPE_SOCKET_ALIVE &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
-
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// RequestTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::RequestTracker::kMaxNumSources = 100;
-const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardSize = 25;
-
-PassiveLogCollector::RequestTracker::RequestTracker(PassiveLogCollector* parent)
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, parent) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::RequestTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- if (entry.type == net::NetLog::TYPE_HTTP_STREAM_REQUEST_BOUND_TO_JOB) {
- const net::NetLog::Source& source_dependency =
- static_cast<net::NetLogSourceParameter*>(entry.params.get())->value();
- AddReferenceToSourceDependency(source_dependency, out_info);
- }
-
- // Don't keep read bytes around in the log, to save memory.
- if (entry.type == net::NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ ||
- entry.type == net::NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ) {
- return ACTION_NONE;
- }
-
- AddEntryToSourceInfo(entry, out_info);
-
- // If the request has ended, move it to the graveyard.
- if (entry.type == net::NetLog::TYPE_REQUEST_ALIVE &&
- entry.phase == net::NetLog::PHASE_END) {
- if (StartsWithASCII(out_info->GetURL(), "chrome://", false)) {
- // Avoid sending "chrome://" requests to the graveyard, since it just
- // adds to clutter.
- return ACTION_DELETE;
- }
- return ACTION_MOVE_TO_GRAVEYARD;
- }
-
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// ProxyScriptDeciderTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::ProxyScriptDeciderTracker::kMaxNumSources
- = 20;
-const size_t PassiveLogCollector::ProxyScriptDeciderTracker::kMaxGraveyardSize
- = 3;
-
-PassiveLogCollector::ProxyScriptDeciderTracker::ProxyScriptDeciderTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::ProxyScriptDeciderTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_PROXY_SCRIPT_DECIDER &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// SpdySessionTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::SpdySessionTracker::kMaxNumSources = 50;
-const size_t PassiveLogCollector::SpdySessionTracker::kMaxGraveyardSize = 10;
-
-PassiveLogCollector::SpdySessionTracker::SpdySessionTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::SpdySessionTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_SPDY_SESSION &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// HostResolverRequestTracker
-//----------------------------------------------------------------------------
-
-const size_t
-PassiveLogCollector::HostResolverRequestTracker::kMaxNumSources = 200;
-const size_t
-PassiveLogCollector::HostResolverRequestTracker::kMaxGraveyardSize = 20;
-
-PassiveLogCollector::HostResolverRequestTracker::HostResolverRequestTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::HostResolverRequestTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_HOST_RESOLVER_IMPL_REQUEST &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// HostResolverJobTracker
-//----------------------------------------------------------------------------
-
-const size_t
-PassiveLogCollector::HostResolverJobTracker::kMaxNumSources = 100;
-const size_t
-PassiveLogCollector::HostResolverJobTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::HostResolverJobTracker::HostResolverJobTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::HostResolverJobTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_HOST_RESOLVER_IMPL_JOB &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// HostResolverProcTaskTracker
-//----------------------------------------------------------------------------
-
-const size_t
-PassiveLogCollector::HostResolverProcTaskTracker::kMaxNumSources = 100;
-const size_t
-PassiveLogCollector::HostResolverProcTaskTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::HostResolverProcTaskTracker::HostResolverProcTaskTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::HostResolverProcTaskTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_HOST_RESOLVER_IMPL_PROC_TASK &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// DiskCacheEntryTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::DiskCacheEntryTracker::kMaxNumSources = 100;
-const size_t PassiveLogCollector::DiskCacheEntryTracker::kMaxGraveyardSize = 25;
-
-PassiveLogCollector::DiskCacheEntryTracker::DiskCacheEntryTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::DiskCacheEntryTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
-
- // If the request has ended, move it to the graveyard.
- if (entry.type == net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// MemCacheEntryTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::MemCacheEntryTracker::kMaxNumSources = 100;
-const size_t PassiveLogCollector::MemCacheEntryTracker::kMaxGraveyardSize = 25;
-
-PassiveLogCollector::MemCacheEntryTracker::MemCacheEntryTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::MemCacheEntryTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
-
- // If the request has ended, move it to the graveyard.
- if (entry.type == net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// HttpStreamJobTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::HttpStreamJobTracker::kMaxNumSources = 100;
-const size_t PassiveLogCollector::HttpStreamJobTracker::kMaxGraveyardSize = 25;
-
-PassiveLogCollector::HttpStreamJobTracker::HttpStreamJobTracker(
- PassiveLogCollector* parent)
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, parent) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::HttpStreamJobTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- if (entry.type == net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB ||
- entry.type == net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET) {
- const net::NetLog::Source& source_dependency =
- static_cast<net::NetLogSourceParameter*>(entry.params.get())->value();
- AddReferenceToSourceDependency(source_dependency, out_info);
- }
-
- AddEntryToSourceInfo(entry, out_info);
-
- // If the request has ended, move it to the graveyard.
- if (entry.type == net::NetLog::TYPE_HTTP_STREAM_JOB &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// ExponentialBackoffThrottlingTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::
- ExponentialBackoffThrottlingTracker::kMaxNumSources = 100;
-const size_t PassiveLogCollector::
- ExponentialBackoffThrottlingTracker::kMaxGraveyardSize = 25;
-
-PassiveLogCollector::
- ExponentialBackoffThrottlingTracker::ExponentialBackoffThrottlingTracker(
- PassiveLogCollector* parent)
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, parent) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::ExponentialBackoffThrottlingTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry, SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// DnsTransactionTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::DnsTransactionTracker::kMaxNumSources = 100;
-const size_t PassiveLogCollector::DnsTransactionTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::DnsTransactionTracker::DnsTransactionTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::DnsTransactionTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_DNS_TRANSACTION &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// AsyncHostResolverRequestTracker
-//----------------------------------------------------------------------------
-
-const size_t
-PassiveLogCollector::AsyncHostResolverRequestTracker::kMaxNumSources = 100;
-
-const size_t
-PassiveLogCollector::AsyncHostResolverRequestTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::
- AsyncHostResolverRequestTracker::AsyncHostResolverRequestTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::AsyncHostResolverRequestTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_ASYNC_HOST_RESOLVER_REQUEST &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// UDPSocketTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::UDPSocketTracker::kMaxNumSources = 200;
-const size_t PassiveLogCollector::UDPSocketTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::UDPSocketTracker::UDPSocketTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::UDPSocketTracker::Action
-PassiveLogCollector::UDPSocketTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- if (entry.type == net::NetLog::TYPE_UDP_BYTES_SENT ||
- entry.type == net::NetLog::TYPE_UDP_BYTES_RECEIVED) {
- return ACTION_NONE;
- }
-
- AddEntryToSourceInfo(entry, out_info);
-
- if (entry.type == net::NetLog::TYPE_SOCKET_ALIVE &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
-
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// CertVerifierJobTracker
-//----------------------------------------------------------------------------
-
-const size_t
-PassiveLogCollector::CertVerifierJobTracker::kMaxNumSources = 100;
-
-const size_t
-PassiveLogCollector::CertVerifierJobTracker::kMaxGraveyardSize = 15;
-
-PassiveLogCollector::
- CertVerifierJobTracker::CertVerifierJobTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::CertVerifierJobTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_CERT_VERIFIER_JOB &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// HttpPipelinedConnectionTracker
-//----------------------------------------------------------------------------
-
-const size_t
-PassiveLogCollector::HttpPipelinedConnectionTracker::kMaxNumSources = 100;
-
-const size_t
-PassiveLogCollector::HttpPipelinedConnectionTracker::kMaxGraveyardSize = 25;
-
-PassiveLogCollector::
- HttpPipelinedConnectionTracker::HttpPipelinedConnectionTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::HttpPipelinedConnectionTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_HTTP_PIPELINED_CONNECTION &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// DownloadTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::DownloadTracker::kMaxNumSources = 200;
-
-const size_t PassiveLogCollector::DownloadTracker::kMaxGraveyardSize = 50;
-
-PassiveLogCollector::DownloadTracker::DownloadTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::DownloadTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- if ((entry.type == net::NetLog::TYPE_DOWNLOAD_FILE_WRITTEN) ||
- (entry.type == net::NetLog::TYPE_DOWNLOAD_ITEM_UPDATED)) {
- return ACTION_NONE; // Don't passively log these (too many).
- }
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_DOWNLOAD_ITEM_ACTIVE &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
-
-//----------------------------------------------------------------------------
-// FileStreamTracker
-//----------------------------------------------------------------------------
-
-const size_t PassiveLogCollector::FileStreamTracker::kMaxNumSources = 100;
-
-const size_t PassiveLogCollector::FileStreamTracker::kMaxGraveyardSize = 25;
-
-PassiveLogCollector::FileStreamTracker::FileStreamTracker()
- : SourceTracker(kMaxNumSources, kMaxGraveyardSize, NULL) {
-}
-
-PassiveLogCollector::SourceTracker::Action
-PassiveLogCollector::FileStreamTracker::DoAddEntry(
- const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) {
- AddEntryToSourceInfo(entry, out_info);
- if (entry.type == net::NetLog::TYPE_FILE_STREAM_ALIVE &&
- entry.phase == net::NetLog::PHASE_END) {
- return ACTION_MOVE_TO_GRAVEYARD;
- }
- return ACTION_NONE;
-}
diff --git a/chrome/browser/net/passive_log_collector.h b/chrome/browser/net/passive_log_collector.h
deleted file mode 100644
index 701c5c3..0000000
--- a/chrome/browser/net/passive_log_collector.h
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CHROME_BROWSER_NET_PASSIVE_LOG_COLLECTOR_H_
-#define CHROME_BROWSER_NET_PASSIVE_LOG_COLLECTOR_H_
-#pragma once
-
-#include <deque>
-#include <string>
-#include <vector>
-
-#include "base/gtest_prod_util.h"
-#include "base/hash_tables.h"
-#include "base/memory/ref_counted.h"
-#include "base/time.h"
-#include "chrome/browser/net/chrome_net_log.h"
-#include "net/base/net_log.h"
-
-// PassiveLogCollector watches the NetLog event stream, and saves the network
-// events for recent requests, in a circular buffer.
-//
-// This is done so that when a network problem is encountered (performance
-// problem, or error), about:net-internals can be opened shortly after the
-// problem and it will contain a trace for the problem request.
-//
-// (This is in contrast to the "active logging" which captures every single
-// network event, but requires capturing to have been enabled *prior* to
-// encountering the problem. Active capturing is enabled as long as
-// about:net-internals is open).
-//
-// The data captured by PassiveLogCollector is grouped by NetLog::Source, into
-// a SourceInfo structure. These in turn are grouped by NetLog::SourceType, and
-// owned by a SourceTracker instance for the specific source type.
-//
-// The PassiveLogCollector is owned by the ChromeNetLog itself, and is not
-// thread safe. The ChromeNetLog is responsible for calling it in a thread safe
-// manner.
-class PassiveLogCollector : public ChromeNetLog::ThreadSafeObserverImpl {
- public:
- typedef std::vector<net::NetLog::Source> SourceDependencyList;
-
- struct SourceInfo {
- SourceInfo();
- ~SourceInfo();
-
- // Returns the URL that corresponds with this source. This is
- // only meaningful for certain source types (URL_REQUEST, SOCKET_STREAM).
- // For the rest, it will return an empty string.
- std::string GetURL() const;
-
- uint32 source_id;
- ChromeNetLog::EntryList entries;
- size_t num_entries_truncated;
-
- // List of other sources which contain information relevant to this
- // source (for example, a url request might depend on the log items
- // for a connect job and for a socket that were bound to it.)
- SourceDependencyList dependencies;
-
- // Holds the count of how many other sources have added this as a
- // dependent source. When it is 0, it means noone has referenced it so it
- // can be deleted normally.
- int reference_count;
-
- // |is_alive| is set to false once the source has been added to the
- // tracker's graveyard (it may still be kept around due to a non-zero
- // reference_count, but it is still considered "dead").
- bool is_alive;
- };
-
- typedef std::vector<SourceInfo> SourceInfoList;
-
- // Interface for consuming a NetLog entry.
- class SourceTrackerInterface {
- public:
- virtual ~SourceTrackerInterface() {}
-
- virtual void OnAddEntry(const ChromeNetLog::Entry& entry) = 0;
-
- // Clears all the passively logged data from this tracker.
- virtual void Clear() = 0;
-
- // Appends all the captured entries to |out|. The ordering is undefined.
- virtual void AppendAllEntries(ChromeNetLog::EntryList* out) const = 0;
- };
-
- // This source tracker is intended for TYPE_NONE. All entries go into a
- // circular buffer, and there is no concept of live/dead requests.
- class GlobalSourceTracker : public SourceTrackerInterface {
- public:
- GlobalSourceTracker();
- virtual ~GlobalSourceTracker();
-
- // SourceTrackerInterface implementation:
- virtual void OnAddEntry(const ChromeNetLog::Entry& entry) OVERRIDE;
- virtual void Clear() OVERRIDE;
- virtual void AppendAllEntries(ChromeNetLog::EntryList* out) const OVERRIDE;
-
- private:
- typedef std::deque<ChromeNetLog::Entry> CircularEntryList;
- CircularEntryList entries_;
- DISALLOW_COPY_AND_ASSIGN(GlobalSourceTracker);
- };
-
- // This class stores and manages the passively logged information for
- // URLRequests/SocketStreams/ConnectJobs.
- class SourceTracker : public SourceTrackerInterface {
- public:
- // Creates a SourceTracker that will track at most |max_num_sources|.
- // Up to |max_graveyard_size| unreferenced sources will be kept around
- // before deleting them for good. |parent| may be NULL, and points to
- // the owning PassiveLogCollector (it is used when adding references
- // to other sources).
- SourceTracker(size_t max_num_sources,
- size_t max_graveyard_size,
- PassiveLogCollector* parent);
-
- virtual ~SourceTracker();
-
- // SourceTrackerInterface implementation:
- virtual void OnAddEntry(const ChromeNetLog::Entry& entry) OVERRIDE;
- virtual void Clear() OVERRIDE;
- virtual void AppendAllEntries(ChromeNetLog::EntryList* out) const OVERRIDE;
-
-#ifdef UNIT_TEST
- // Helper used to inspect the current state by unit-tests.
- // Retuns a copy of the source infos held by the tracker.
- SourceInfoList GetAllDeadOrAliveSources(bool is_alive) const {
- SourceInfoList result;
- for (SourceIDToInfoMap::const_iterator it = sources_.begin();
- it != sources_.end(); ++it) {
- if (it->second.is_alive == is_alive)
- result.push_back(it->second);
- }
- return result;
- }
-#endif
-
- protected:
- enum Action {
- ACTION_NONE,
- ACTION_DELETE,
- ACTION_MOVE_TO_GRAVEYARD,
- };
-
- // Makes |info| hold a reference to |source|. This way |source| will be
- // kept alive at least as long as |info|.
- void AddReferenceToSourceDependency(const net::NetLog::Source& source,
- SourceInfo* info);
-
- private:
- typedef base::hash_map<uint32, SourceInfo> SourceIDToInfoMap;
- typedef std::deque<uint32> DeletionQueue;
-
- // Updates |out_info| with the information from |entry|. Returns an action
- // to perform for this map entry on completion.
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) = 0;
-
- // Removes |source_id| from |sources_|. This also releases any references
- // to dependencies held by this source.
- void DeleteSourceInfo(uint32 source_id);
-
- // Adds |source_id| to the FIFO queue (graveyard) for deletion.
- void AddToDeletionQueue(uint32 source_id);
-
- // Removes |source_id| from the |deletion_queue_| container.
- void EraseFromDeletionQueue(uint32 source_id);
-
- // Adds/Releases a reference from the source with ID |source_id|.
- // Use |offset=-1| to do a release, and |offset=1| for an addref.
- void AdjustReferenceCountForSource(int offset, uint32 source_id);
-
- // Releases all the references to sources held by |info|.
- void ReleaseAllReferencesToDependencies(SourceInfo* info);
-
- // This map contains all of the sources being tracked by this tracker.
- // (It includes both the "live" sources, and the "dead" ones.)
- SourceIDToInfoMap sources_;
-
- size_t max_num_sources_;
- size_t max_graveyard_size_;
-
- // FIFO queue for entries in |sources_| that are no longer alive, and
- // can be deleted. This buffer is also called "graveyard" elsewhere. We
- // queue sources for deletion so they can persist a bit longer.
- DeletionQueue deletion_queue_;
-
- PassiveLogCollector* parent_;
-
- DISALLOW_COPY_AND_ASSIGN(SourceTracker);
- };
-
- // Specialization of SourceTracker for handling ConnectJobs.
- class ConnectJobTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- explicit ConnectJobTracker(PassiveLogCollector* parent);
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
- DISALLOW_COPY_AND_ASSIGN(ConnectJobTracker);
- };
-
- // Specialization of SourceTracker for handling Sockets.
- class SocketTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- SocketTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(SocketTracker);
- };
-
- // Specialization of SourceTracker for handling net::URLRequest/SocketStream.
- class RequestTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- explicit RequestTracker(PassiveLogCollector* parent);
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(RequestTracker);
- };
-
- // Specialization of SourceTracker for handling
- // SOURCE_PROXY_SCRIPT_DECIDER.
- class ProxyScriptDeciderTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- ProxyScriptDeciderTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(ProxyScriptDeciderTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_SPDY_SESSION.
- class SpdySessionTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- SpdySessionTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(SpdySessionTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_HOST_RESOLVER_IMPL_REQUEST.
- class HostResolverRequestTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- HostResolverRequestTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(HostResolverRequestTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_HOST_RESOLVER_IMPL_JOB.
- class HostResolverJobTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- HostResolverJobTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(HostResolverJobTracker);
- };
-
- // Tracks the log entries for the last seen
- // SOURCE_HOST_RESOLVER_IMPL_PROC_TASK.
- class HostResolverProcTaskTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- HostResolverProcTaskTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(HostResolverProcTaskTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_DISK_CACHE_ENTRY.
- class DiskCacheEntryTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- DiskCacheEntryTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(DiskCacheEntryTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_DISK_CACHE_ENTRY.
- class MemCacheEntryTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- MemCacheEntryTracker();
-
- protected:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MemCacheEntryTracker);
- };
-
- class HttpStreamJobTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- explicit HttpStreamJobTracker(PassiveLogCollector* parent);
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
- DISALLOW_COPY_AND_ASSIGN(HttpStreamJobTracker);
- };
-
- class ExponentialBackoffThrottlingTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- explicit ExponentialBackoffThrottlingTracker(PassiveLogCollector* parent);
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
- DISALLOW_COPY_AND_ASSIGN(ExponentialBackoffThrottlingTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_DNS_TRANSACTION.
- class DnsTransactionTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- DnsTransactionTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(DnsTransactionTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_ASYNC_HOST_RESOLVER_REQUEST
- class AsyncHostResolverRequestTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- AsyncHostResolverRequestTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(AsyncHostResolverRequestTracker);
- };
-
-
- // Tracks the log entries for the last seen SOURCE_UDP_SOCKET.
- class UDPSocketTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- UDPSocketTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(UDPSocketTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_CERT_VERIFIER_JOB.
- class CertVerifierJobTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- CertVerifierJobTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(CertVerifierJobTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_HTTP_PIPELINED_CONNECTION.
- class HttpPipelinedConnectionTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- HttpPipelinedConnectionTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(HttpPipelinedConnectionTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_DOWNLOAD.
- class DownloadTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- DownloadTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(DownloadTracker);
- };
-
- // Tracks the log entries for the last seen SOURCE_FILESTREAM.
- class FileStreamTracker : public SourceTracker {
- public:
- static const size_t kMaxNumSources;
- static const size_t kMaxGraveyardSize;
-
- FileStreamTracker();
-
- private:
- virtual Action DoAddEntry(const ChromeNetLog::Entry& entry,
- SourceInfo* out_info) OVERRIDE;
-
- DISALLOW_COPY_AND_ASSIGN(FileStreamTracker);
- };
-
- PassiveLogCollector();
- virtual ~PassiveLogCollector();
-
- // ThreadSafeObserver implementation:
- virtual void OnAddEntry(net::NetLog::EventType type,
- const base::TimeTicks& time,
- const net::NetLog::Source& source,
- net::NetLog::EventPhase phase,
- net::NetLog::EventParameters* params) OVERRIDE;
-
- // Clears all of the passively logged data.
- void Clear();
-
- // Fills |out| with the full list of events that have been passively
- // captured. The list is ordered by capture time.
- void GetAllCapturedEvents(ChromeNetLog::EntryList* out) const;
-
- private:
- // Returns the tracker to use for sources of type |source_type|, or NULL.
- SourceTrackerInterface* GetTrackerForSourceType(
- net::NetLog::SourceType source_type);
-
- FRIEND_TEST_ALL_PREFIXES(PassiveLogCollectorTest,
- HoldReferenceToDependentSource);
- FRIEND_TEST_ALL_PREFIXES(PassiveLogCollectorTest,
- HoldReferenceToDeletedSource);
-
- GlobalSourceTracker global_source_tracker_;
- ConnectJobTracker connect_job_tracker_;
- SocketTracker socket_tracker_;
- RequestTracker url_request_tracker_;
- RequestTracker socket_stream_tracker_;
- ProxyScriptDeciderTracker proxy_script_decider_tracker_;
- SpdySessionTracker spdy_session_tracker_;
- HostResolverRequestTracker dns_request_tracker_;
- HostResolverJobTracker dns_job_tracker_;
- HostResolverProcTaskTracker dns_proc_task_tracker_;
- DiskCacheEntryTracker disk_cache_entry_tracker_;
- MemCacheEntryTracker mem_cache_entry_tracker_;
- HttpStreamJobTracker http_stream_job_tracker_;
- ExponentialBackoffThrottlingTracker exponential_backoff_throttling_tracker_;
- DnsTransactionTracker dns_transaction_tracker_;
- AsyncHostResolverRequestTracker async_host_resolver_request_tracker_;
- UDPSocketTracker udp_socket_tracker_;
- CertVerifierJobTracker cert_verifier_job_tracker_;
- HttpPipelinedConnectionTracker http_pipelined_connection_tracker_;
- DownloadTracker download_tracker_;
- FileStreamTracker file_stream_tracker_;
-
- // This array maps each NetLog::SourceType to one of the tracker instances
- // defined above. Use of this array avoid duplicating the list of trackers
- // elsewhere.
- SourceTrackerInterface* trackers_[net::NetLog::SOURCE_COUNT];
-
- // The count of how many events have flowed through this log. Used to set the
- // "order" field on captured events.
- uint32 num_events_seen_;
-
- DISALLOW_COPY_AND_ASSIGN(PassiveLogCollector);
-};
-
-#endif // CHROME_BROWSER_NET_PASSIVE_LOG_COLLECTOR_H_
diff --git a/chrome/browser/net/passive_log_collector_unittest.cc b/chrome/browser/net/passive_log_collector_unittest.cc
deleted file mode 100644
index 08a97df..0000000
--- a/chrome/browser/net/passive_log_collector_unittest.cc
+++ /dev/null
@@ -1,494 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/net/passive_log_collector.h"
-
-#include "base/compiler_specific.h"
-#include "base/format_macros.h"
-#include "base/stringprintf.h"
-#include "net/url_request/url_request_netlog_params.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-typedef PassiveLogCollector::RequestTracker RequestTracker;
-typedef PassiveLogCollector::SourceInfoList SourceInfoList;
-typedef PassiveLogCollector::SocketTracker SocketTracker;
-typedef PassiveLogCollector::HttpStreamJobTracker HttpStreamJobTracker;
-using net::NetLog;
-
-const NetLog::SourceType kSourceType = NetLog::SOURCE_NONE;
-
-ChromeNetLog::Entry MakeStartLogEntryWithURL(int source_id,
- const std::string& url) {
- return ChromeNetLog::Entry(
- 0,
- NetLog::TYPE_URL_REQUEST_START_JOB,
- base::TimeTicks(),
- NetLog::Source(kSourceType, source_id),
- NetLog::PHASE_BEGIN,
- new net::URLRequestStartEventParameters(GURL(url), "GET", 0, net::LOW));
-}
-
-ChromeNetLog::Entry MakeStartLogEntry(int source_id) {
- return MakeStartLogEntryWithURL(
- source_id, base::StringPrintf("http://req%d", source_id));
-}
-
-ChromeNetLog::Entry MakeEndLogEntry(int source_id) {
- return ChromeNetLog::Entry(
- 0,
- NetLog::TYPE_REQUEST_ALIVE,
- base::TimeTicks(),
- NetLog::Source(kSourceType, source_id),
- NetLog::PHASE_END,
- NULL);
-}
-
-bool OrderBySourceID(const PassiveLogCollector::SourceInfo& a,
- const PassiveLogCollector::SourceInfo& b) {
- return a.source_id < b.source_id;
-}
-
-SourceInfoList GetLiveSources(
- const PassiveLogCollector::SourceTracker& tracker) {
- SourceInfoList result = tracker.GetAllDeadOrAliveSources(true);
- std::sort(result.begin(), result.end(), &OrderBySourceID);
- return result;
-}
-
-SourceInfoList GetDeadSources(
- const PassiveLogCollector::SourceTracker& tracker) {
- SourceInfoList result = tracker.GetAllDeadOrAliveSources(false);
- std::sort(result.begin(), result.end(), &OrderBySourceID);
- return result;
-}
-
-static const int kMaxNumLoadLogEntries = 1;
-
-} // namespace
-
-// Test that once the tracker contains a total maximum amount of data
-// (graveyard + live requests), it resets itself to avoid growing unbounded.
-TEST(RequestTrackerTest, DropsAfterMaximumSize) {
- RequestTracker tracker(NULL);
-
- // Fill the source tracker with as many sources as it can hold.
- for (size_t i = 0; i < RequestTracker::kMaxNumSources; ++i)
- tracker.OnAddEntry(MakeStartLogEntry(i));
-
- EXPECT_EQ(RequestTracker::kMaxNumSources, GetLiveSources(tracker).size());
-
- // Add 1 more -- this should cause it to exceed its expected peak, and
- // therefore reset all of its data.
- tracker.OnAddEntry(
- MakeStartLogEntry(1 + RequestTracker::kMaxNumSources));
-
- EXPECT_EQ(1u, GetLiveSources(tracker).size());
-}
-
-TEST(RequestTrackerTest, BasicBounded) {
- RequestTracker tracker(NULL);
- EXPECT_EQ(0u, GetLiveSources(tracker).size());
- EXPECT_EQ(0u, GetDeadSources(tracker).size());
-
- tracker.OnAddEntry(MakeStartLogEntry(1));
- tracker.OnAddEntry(MakeStartLogEntry(2));
- tracker.OnAddEntry(MakeStartLogEntry(3));
- tracker.OnAddEntry(MakeStartLogEntry(4));
- tracker.OnAddEntry(MakeStartLogEntry(5));
-
- SourceInfoList live_reqs = GetLiveSources(tracker);
-
- ASSERT_EQ(5u, live_reqs.size());
- EXPECT_EQ("http://req1/", live_reqs[0].GetURL());
- EXPECT_EQ("http://req2/", live_reqs[1].GetURL());
- EXPECT_EQ("http://req3/", live_reqs[2].GetURL());
- EXPECT_EQ("http://req4/", live_reqs[3].GetURL());
- EXPECT_EQ("http://req5/", live_reqs[4].GetURL());
-
- tracker.OnAddEntry(MakeEndLogEntry(1));
- tracker.OnAddEntry(MakeEndLogEntry(5));
- tracker.OnAddEntry(MakeEndLogEntry(3));
-
- ASSERT_EQ(3u, GetDeadSources(tracker).size());
-
- live_reqs = GetLiveSources(tracker);
-
- ASSERT_EQ(2u, live_reqs.size());
- EXPECT_EQ("http://req2/", live_reqs[0].GetURL());
- EXPECT_EQ("http://req4/", live_reqs[1].GetURL());
-}
-
-TEST(RequestTrackerTest, GraveyardBounded) {
- RequestTracker tracker(NULL);
- EXPECT_EQ(0u, GetLiveSources(tracker).size());
- EXPECT_EQ(0u, GetDeadSources(tracker).size());
-
- // Add twice as many requests as will fit in the graveyard.
- for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize * 2; ++i) {
- tracker.OnAddEntry(MakeStartLogEntry(i));
- tracker.OnAddEntry(MakeEndLogEntry(i));
- }
-
- EXPECT_EQ(0u, GetLiveSources(tracker).size());
-
- // Check that only the last |kMaxGraveyardSize| requests are in-memory.
-
- SourceInfoList recent = GetDeadSources(tracker);
-
- ASSERT_EQ(RequestTracker::kMaxGraveyardSize, recent.size());
-
- for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize; ++i) {
- size_t req_number = i + RequestTracker::kMaxGraveyardSize;
- std::string url = base::StringPrintf("http://req%" PRIuS "/", req_number);
- EXPECT_EQ(url, recent[i].GetURL());
- }
-}
-
-// Check that we exclude "chrome://" URLs from being saved into the recent
-// requests list (graveyard).
-TEST(RequestTrackerTest, GraveyardIsFiltered) {
- RequestTracker tracker(NULL);
-
- // This will be excluded.
- std::string url1 = "chrome://dontcare/";
- tracker.OnAddEntry(MakeStartLogEntryWithURL(1, url1));
- tracker.OnAddEntry(MakeEndLogEntry(1));
-
- // This will be be added to graveyard.
- std::string url2 = "chrome2://dontcare/";
- tracker.OnAddEntry(MakeStartLogEntryWithURL(2, url2));
- tracker.OnAddEntry(MakeEndLogEntry(2));
-
- // This will be be added to graveyard.
- std::string url3 = "http://foo/";
- tracker.OnAddEntry(MakeStartLogEntryWithURL(3, url3));
- tracker.OnAddEntry(MakeEndLogEntry(3));
-
- ASSERT_EQ(2u, GetDeadSources(tracker).size());
- EXPECT_EQ(url2, GetDeadSources(tracker)[0].GetURL());
- EXPECT_EQ(url3, GetDeadSources(tracker)[1].GetURL());
-}
-
-TEST(SpdySessionTracker, MovesToGraveyard) {
- PassiveLogCollector::SpdySessionTracker tracker;
- EXPECT_EQ(0u, GetLiveSources(tracker).size());
- EXPECT_EQ(0u, GetDeadSources(tracker).size());
-
- ChromeNetLog::Entry begin(
- 0u,
- NetLog::TYPE_SPDY_SESSION,
- base::TimeTicks(),
- NetLog::Source(NetLog::SOURCE_SPDY_SESSION, 1),
- NetLog::PHASE_BEGIN,
- NULL);
-
- tracker.OnAddEntry(begin);
- EXPECT_EQ(1u, GetLiveSources(tracker).size());
- EXPECT_EQ(0u, GetDeadSources(tracker).size());
-
- ChromeNetLog::Entry end(
- 0u,
- NetLog::TYPE_SPDY_SESSION,
- base::TimeTicks(),
- NetLog::Source(NetLog::SOURCE_SPDY_SESSION, 1),
- NetLog::PHASE_END,
- NULL);
-
- tracker.OnAddEntry(end);
- EXPECT_EQ(0u, GetLiveSources(tracker).size());
- EXPECT_EQ(1u, GetDeadSources(tracker).size());
-}
-
-// Test that when a SOURCE_HTTP_STREAM_JOB is connected to a SOURCE_URL_REQUEST
-// (via the TYPE_HTTP_STREAM_REQUEST_BOUND_TO_JOB event), it holds a reference
-// to the SOURCE_HTTP_STREAM_JOB preventing it from getting deleted as long as
-// the SOURCE_URL_REQUEST is still around.
-TEST(PassiveLogCollectorTest, HoldReferenceToDependentSource) {
- PassiveLogCollector log;
-
- EXPECT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
- EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
-
- uint32 next_id = 0;
- NetLog::Source stream_job_source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++);
- NetLog::Source url_request_source(NetLog::SOURCE_URL_REQUEST, next_id++);
-
- // Start a SOURCE_HTTP_STREAM_JOB.
- log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- stream_job_source,
- NetLog::PHASE_BEGIN,
- NULL);
-
- EXPECT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
- EXPECT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
-
- // Start a SOURCE_URL_REQUEST.
- log.OnAddEntry(NetLog::TYPE_REQUEST_ALIVE,
- base::TimeTicks(),
- url_request_source,
- NetLog::PHASE_BEGIN,
- NULL);
-
- // Check that there is no association between the SOURCE_URL_REQUEST and the
- // SOURCE_HTTP_STREAM_JOB yet.
- ASSERT_EQ(1u, GetLiveSources(log.url_request_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetLiveSources(log.url_request_tracker_)[0];
- EXPECT_EQ(0, info.reference_count);
- EXPECT_EQ(0u, info.dependencies.size());
- }
- ASSERT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetLiveSources(log.http_stream_job_tracker_)[0];
- EXPECT_EQ(0, info.reference_count);
- EXPECT_EQ(0u, info.dependencies.size());
- }
-
- // Associate the SOURCE_HTTP_STREAM_JOB with the SOURCE_URL_REQUEST.
- log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_REQUEST_BOUND_TO_JOB,
- base::TimeTicks(),
- url_request_source,
- NetLog::PHASE_NONE,
- new net::NetLogSourceParameter("x", stream_job_source));
-
- // Check that an associate was made -- the SOURCE_URL_REQUEST should have
- // added a reference to the SOURCE_HTTP_STREAM_JOB.
- ASSERT_EQ(1u, GetLiveSources(log.url_request_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetLiveSources(log.url_request_tracker_)[0];
- EXPECT_EQ(0, info.reference_count);
- EXPECT_EQ(1u, info.dependencies.size());
- EXPECT_EQ(stream_job_source.id, info.dependencies[0].id);
- }
- ASSERT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetLiveSources(log.http_stream_job_tracker_)[0];
- EXPECT_EQ(1, info.reference_count);
- EXPECT_EQ(0u, info.dependencies.size());
- }
-
- // Now end both |stream_job_source| and |url_request_source|. This sends them
- // to deletion queue, and they will be deleted once space runs out.
-
- log.OnAddEntry(NetLog::TYPE_REQUEST_ALIVE,
- base::TimeTicks(),
- url_request_source,
- NetLog::PHASE_END,
- NULL);
-
- log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- stream_job_source,
- NetLog::PHASE_END,
- NULL);
-
- // Verify that both sources are in fact dead, and that |url_request_source|
- // still holds a reference to |stream_job_source|.
- ASSERT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
- ASSERT_EQ(1u, GetDeadSources(log.url_request_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetDeadSources(log.url_request_tracker_)[0];
- EXPECT_EQ(0, info.reference_count);
- EXPECT_EQ(1u, info.dependencies.size());
- EXPECT_EQ(stream_job_source.id, info.dependencies[0].id);
- }
- EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
- ASSERT_EQ(1u, GetDeadSources(log.http_stream_job_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetDeadSources(log.http_stream_job_tracker_)[0];
- EXPECT_EQ(1, info.reference_count);
- EXPECT_EQ(0u, info.dependencies.size());
- }
-
- // Cycle through a bunch of SOURCE_HTTP_STREAM_JOB -- if it were not
- // referenced, this loop will have deleted it.
- for (size_t i = 0; i < HttpStreamJobTracker::kMaxGraveyardSize; ++i) {
- log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- NetLog::Source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++),
- NetLog::PHASE_END,
- NULL);
- }
-
- EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
- ASSERT_EQ(HttpStreamJobTracker::kMaxGraveyardSize + 1,
- GetDeadSources(log.http_stream_job_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetDeadSources(log.http_stream_job_tracker_)[0];
- EXPECT_EQ(stream_job_source.id, info.source_id);
- EXPECT_EQ(1, info.reference_count);
- EXPECT_EQ(0u, info.dependencies.size());
- }
-
- // Cycle through a bunch of SOURCE_URL_REQUEST -- this will cause
- // |url_request_source| to be freed, which in turn should release the final
- // reference to |stream_job_source| cause it to be freed as well.
- for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize; ++i) {
- log.OnAddEntry(NetLog::TYPE_REQUEST_ALIVE,
- base::TimeTicks(),
- NetLog::Source(NetLog::SOURCE_URL_REQUEST, next_id++),
- NetLog::PHASE_END,
- NULL);
- }
-
- EXPECT_EQ(0u, GetLiveSources(log.url_request_tracker_).size());
- EXPECT_EQ(RequestTracker::kMaxGraveyardSize,
- GetDeadSources(log.url_request_tracker_).size());
-
- EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
- EXPECT_EQ(HttpStreamJobTracker::kMaxGraveyardSize,
- GetDeadSources(log.http_stream_job_tracker_).size());
-}
-
-// Have a HTTP_STREAM_JOB hold a reference to a SOCKET. Then cause the SOCKET to
-// get evicted (by exceeding maximum sources limit). Now the HTTP_STREAM_JOB is
-// referencing a non-existant SOCKET. Lastly, evict the HTTP_STREAM_JOB so it
-// tries to drop all of its references. Make sure that in releasing its
-// non-existant reference it doesn't trip any DCHECKs.
-TEST(PassiveLogCollectorTest, HoldReferenceToDeletedSource) {
- PassiveLogCollector log;
-
- EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
- EXPECT_EQ(0u, GetLiveSources(log.socket_tracker_).size());
-
- uint32 next_id = 0;
- NetLog::Source socket_source(NetLog::SOURCE_SOCKET, next_id++);
- NetLog::Source stream_job_source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++);
-
- // Start a SOURCE_SOCKET.
- log.OnAddEntry(NetLog::TYPE_SOCKET_ALIVE,
- base::TimeTicks(),
- socket_source,
- NetLog::PHASE_BEGIN,
- NULL);
-
- EXPECT_EQ(0u, GetLiveSources(log.http_stream_job_tracker_).size());
- EXPECT_EQ(1u, GetLiveSources(log.socket_tracker_).size());
-
- // Start a SOURCE_HTTP_STREAM_JOB.
- log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- stream_job_source,
- NetLog::PHASE_BEGIN,
- NULL);
-
- // Associate the SOURCE_SOCKET with the SOURCE_HTTP_STREAM_JOB.
- log.OnAddEntry(NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
- base::TimeTicks(),
- stream_job_source,
- NetLog::PHASE_NONE,
- new net::NetLogSourceParameter("x", socket_source));
-
- // Check that an associate was made -- the SOURCE_HTTP_STREAM_JOB should have
- // added a reference to the SOURCE_SOCKET.
- ASSERT_EQ(1u, GetLiveSources(log.http_stream_job_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetLiveSources(log.http_stream_job_tracker_)[0];
- EXPECT_EQ(0, info.reference_count);
- EXPECT_EQ(1u, info.dependencies.size());
- EXPECT_EQ(socket_source.id, info.dependencies[0].id);
- }
- ASSERT_EQ(1u, GetLiveSources(log.socket_tracker_).size());
- {
- PassiveLogCollector::SourceInfo info =
- GetLiveSources(log.socket_tracker_)[0];
- EXPECT_EQ(1, info.reference_count);
- EXPECT_EQ(0u, info.dependencies.size());
- }
-
- // Add lots of sources to the socket tracker. This is just enough to cause
- // the tracker to reach its peak, and reset all of its data as a safeguard.
- for (size_t i = 0; i < SocketTracker::kMaxNumSources; ++i) {
- log.OnAddEntry(NetLog::TYPE_SOCKET_ALIVE,
- base::TimeTicks(),
- NetLog::Source(NetLog::SOURCE_SOCKET, next_id++),
- NetLog::PHASE_BEGIN,
- NULL);
- }
- ASSERT_EQ(1u, GetLiveSources(log.socket_tracker_).size());
-
- // End the original request. Then saturate the graveyard with enough other
- // requests to cause it to be deleted. Once that source is deleted, it will
- // try to give up its reference to the SOCKET. However that socket_id no
- // longer exists -- should not DCHECK().
- log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- stream_job_source,
- NetLog::PHASE_END,
- NULL);
- for (size_t i = 0; i < HttpStreamJobTracker::kMaxGraveyardSize; ++i) {
- log.OnAddEntry(NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- NetLog::Source(NetLog::SOURCE_HTTP_STREAM_JOB, next_id++),
- NetLog::PHASE_END,
- NULL);
- }
- EXPECT_EQ(HttpStreamJobTracker::kMaxGraveyardSize,
- GetDeadSources(log.http_stream_job_tracker_).size());
-}
-
-// Regression test for http://crbug.com/58847
-TEST(PassiveLogCollectorTest, ReleaseDependencyToUnreferencedSource) {
- PassiveLogCollector log;
-
- // If these constants are weird, the test won't be testing the right thing.
- EXPECT_LT(PassiveLogCollector::HttpStreamJobTracker::kMaxGraveyardSize,
- PassiveLogCollector::HttpStreamJobTracker::kMaxNumSources);
-
- // Add a "reference" to a non-existant source (sourceID=1263 does not exist).
- scoped_refptr<net::NetLog::EventParameters> params =
- new net::NetLogSourceParameter(
- "source_dependency",
- net::NetLog::Source(net::NetLog::SOURCE_SOCKET, 1263));
- log.OnAddEntry(net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
- base::TimeTicks(),
- net::NetLog::Source(net::NetLog::SOURCE_HTTP_STREAM_JOB, 1706),
- net::NetLog::PHASE_NONE,
- params);
-
- // At this point source 1706 has noted 1263 as a dependency. However the
- // reference count for 1263 was not adjusted since it doesn't actually exist.
-
- // Move source 1706 to the graveyard.
- log.OnAddEntry(net::NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- net::NetLog::Source(net::NetLog::SOURCE_HTTP_STREAM_JOB, 1706),
- net::NetLog::PHASE_END,
- NULL);
-
- // Now create a source entry for 1263, such that it is unreferenced and
- // waiting to be garbage collected.
- log.OnAddEntry(net::NetLog::TYPE_SOCKET_ALIVE,
- base::TimeTicks(),
- net::NetLog::Source(net::NetLog::SOURCE_SOCKET, 1263),
- net::NetLog::PHASE_END, NULL);
-
- // Add kMaxGraveyardSize unreferenced HTTP_STREAM_JOBS, so the circular
- // buffer containing source 1706. After adding kMaxGraveyardSize - 1 the
- // buffer will be full. Now when we add one more more source it will now evict
- // the oldest item, which is 1706. In doing so, 1706 will try to release the
- // reference it *thinks* it has on 1263. However 1263 has a reference count
- // of 0 and is already in a graveyard.
- for (size_t i = 0;
- i < PassiveLogCollector::HttpStreamJobTracker::kMaxGraveyardSize; ++i) {
- log.OnAddEntry(net::NetLog::TYPE_HTTP_STREAM_JOB,
- base::TimeTicks(),
- net::NetLog::Source(net::NetLog::SOURCE_HTTP_STREAM_JOB, i),
- net::NetLog::PHASE_END,
- NULL);
- }
-
- // To pass, this should simply not have DCHECK-ed above.
-}
diff --git a/chrome/browser/resources/net_internals/browser_bridge.js b/chrome/browser/resources/net_internals/browser_bridge.js
index 969df1c..c6822da 100644
--- a/chrome/browser/resources/net_internals/browser_bridge.js
+++ b/chrome/browser/resources/net_internals/browser_bridge.js
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -295,10 +295,6 @@ var BrowserBridge = (function() {
this.pollableDataHelpers_.serviceProviders.update(serviceProviders);
},
- receivedPassiveLogEntries: function(entries) {
- this.sourceTracker.onReceivedPassiveLogEntries(entries);
- },
-
receivedStartConnectionTestSuite: function() {
for (var i = 0; i < this.connectionTestsObservers_.length; i++)
this.connectionTestsObservers_[i].onStartedConnectionTestSuite();
diff --git a/chrome/browser/resources/net_internals/capture_view.html b/chrome/browser/resources/net_internals/capture_view.html
index 3aaee18..f7c3c50 100644
--- a/chrome/browser/resources/net_internals/capture_view.html
+++ b/chrome/browser/resources/net_internals/capture_view.html
@@ -3,12 +3,8 @@
<b>Network events generated by the browser are being captured to this window ...</b>
<table style="margin: 15px">
<tr>
- <td>Passively captured events:</td>
- <td align=right id=capture-view-passively-captured-count></td>
- </tr>
- <tr>
- <td>Actively captured events:</td>
- <td align=right id=capture-view-actively-captured-count></td>
+ <td>Captured events:</td>
+ <td align=right id=capture-view-captured-events-count></td>
</tr>
</table>
<p>
diff --git a/chrome/browser/resources/net_internals/capture_view.js b/chrome/browser/resources/net_internals/capture_view.js
index 954a441..255c3fa 100644
--- a/chrome/browser/resources/net_internals/capture_view.js
+++ b/chrome/browser/resources/net_internals/capture_view.js
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -24,9 +24,7 @@ var CaptureView = (function() {
byteLoggingCheckbox.onclick =
this.onSetByteLogging_.bind(this, byteLoggingCheckbox);
- this.activelyCapturedCountBox_ = $(CaptureView.ACTIVELY_CAPTURED_COUNT_ID);
- this.passivelyCapturedCountBox_ =
- $(CaptureView.PASSIVELY_CAPTURED_COUNT_ID);
+ this.capturedEventsCountBox_ = $(CaptureView.CAPTURED_EVENTS_COUNT_ID);
$(CaptureView.DELETE_ALL_ID).onclick =
g_browser.sourceTracker.deleteAllSourceEntries.bind(
g_browser.sourceTracker);
@@ -45,10 +43,7 @@ var CaptureView = (function() {
// IDs for special HTML elements in capture_view.html
CaptureView.MAIN_BOX_ID = 'capture-view-tab-content';
CaptureView.BYTE_LOGGING_CHECKBOX_ID = 'capture-view-byte-logging-checkbox';
- CaptureView.PASSIVELY_CAPTURED_COUNT_ID =
- 'capture-view-passively-captured-count';
- CaptureView.ACTIVELY_CAPTURED_COUNT_ID =
- 'capture-view-actively-captured-count';
+ CaptureView.CAPTURED_EVENTS_COUNT_ID = 'capture-view-captured-events-count';
CaptureView.DELETE_ALL_ID = 'capture-view-delete-all';
CaptureView.TIP_ANCHOR_ID = 'capture-view-tip-anchor';
CaptureView.TIP_DIV_ID = 'capture-view-tip-div';
@@ -104,10 +99,8 @@ var CaptureView = (function() {
* Updates the counters showing how many events have been captured.
*/
updateEventCounts_: function() {
- this.activelyCapturedCountBox_.textContent =
- g_browser.sourceTracker.getNumActivelyCapturedEvents();
- this.passivelyCapturedCountBox_.textContent =
- g_browser.sourceTracker.getNumPassivelyCapturedEvents();
+ this.capturedEventsCountBox_.textContent =
+ g_browser.sourceTracker.getNumCapturedEvents();
},
/**
diff --git a/chrome/browser/resources/net_internals/help.html b/chrome/browser/resources/net_internals/help.html
index 54be2c7..9fb5c1c 100644
--- a/chrome/browser/resources/net_internals/help.html
+++ b/chrome/browser/resources/net_internals/help.html
@@ -1,6 +1,6 @@
<!DOCTYPE HTML>
<html>
-<!-- Copyright (c) 2011 The Chromium Authors. All rights reserved.
+<!-- Copyright (c) 2012 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
@@ -49,13 +49,6 @@
the <i>Events</i> tab.</li>
<li>Once you close the net-internals window, the data it had captured will be
discarded.</li>
-<li>Chrome keeps around a small buffer of the most recent network events
- even when the net-internals window is not open. That way if you open
- chrome://net-internals/ <b>shortly after</b> encountering a problem,
- you may still find the relevant data.
- These <i>passively captured</i> events are less accurate however, and will
- be prefixed in the log with <span style="font-family: monospace;">(P)</span>.
-</li>
</ul>
<button id="popup-close-button">Close this popup</button>
diff --git a/chrome/browser/resources/net_internals/log_util.js b/chrome/browser/resources/net_internals/log_util.js
index 267f56d..d094dda 100644
--- a/chrome/browser/resources/net_internals/log_util.js
+++ b/chrome/browser/resources/net_internals/log_util.js
@@ -187,8 +187,8 @@ log_util = (function() {
// Check for validity of each log entry, and then add the ones that pass.
// Since the events are kept around, and we can't just hide a single view
// on a bad event, we have more error checking for them than other data.
- var validPassiveEvents = [];
- var validActiveEvents = [];
+ var validEvents = [];
+ var numDeprecatedPassiveEvents = 0;
for (var eventIndex = 0; eventIndex < logDump.events.length; ++eventIndex) {
var event = logDump.events[eventIndex];
if (typeof(event) == 'object' && typeof(event.source) == 'object' &&
@@ -197,23 +197,30 @@ log_util = (function() {
getKeyWithValue(LogSourceType, event.source.type) != '?' &&
getKeyWithValue(LogEventPhase, event.phase) != '?') {
if (event.wasPassivelyCaptured) {
- validPassiveEvents.push(event);
- } else {
- validActiveEvents.push(event);
+ // NOTE: Up until Chrome 18, log dumps included "passively captured"
+ // events. These are no longer supported, so skip past them
+ // to avoid confusing the rest of the code.
+ numDeprecatedPassiveEvents++;
+ continue;
}
+ validEvents.push(event);
}
}
- g_browser.sourceTracker.onReceivedPassiveLogEntries(validPassiveEvents);
- g_browser.sourceTracker.onReceivedLogEntries(validActiveEvents);
+ g_browser.sourceTracker.onReceivedLogEntries(validEvents);
- var numInvalidEvents = logDump.events.length
- - validPassiveEvents.length
- - validActiveEvents.length;
+ var numInvalidEvents = logDump.events.length -
+ (validEvents.length + numDeprecatedPassiveEvents);
if (numInvalidEvents > 0) {
errorString += 'Unable to load ' + numInvalidEvents +
' events, due to invalid data.\n\n';
}
+ if (numDeprecatedPassiveEvents > 0) {
+ errorString += 'Discarded ' + numDeprecatedPassiveEvents +
+ ' passively collected events. Use an older version of Chrome to' +
+ ' load this dump if you want to see them.\n\n';
+ }
+
// Update all views with data from the file. Show only those views which
// successfully load the data.
for (var i = 0; i < tabIds.length; ++i) {
diff --git a/chrome/browser/resources/net_internals/log_view_painter.js b/chrome/browser/resources/net_internals/log_view_painter.js
index f380dce..7ff9578 100644
--- a/chrome/browser/resources/net_internals/log_view_painter.js
+++ b/chrome/browser/resources/net_internals/log_view_painter.js
@@ -21,9 +21,7 @@ function canCollapseBeginWithEnd(beginEntry) {
beginEntry.isBegin() &&
beginEntry.end &&
beginEntry.end.index == beginEntry.index + 1 &&
- (!beginEntry.orig.params || !beginEntry.end.orig.params) &&
- beginEntry.orig.wasPassivelyCaptured ==
- beginEntry.end.orig.wasPassivelyCaptured;
+ (!beginEntry.orig.params || !beginEntry.end.orig.params);
}
/**
@@ -48,9 +46,6 @@ printLogEntriesAsText = function(logEntries, parent) {
if (!entry.isEnd() || !canCollapseBeginWithEnd(entry.begin)) {
tablePrinter.addRow();
- // Annotate this entry with "(P)" if it was passively captured.
- tablePrinter.addCell(entry.orig.wasPassivelyCaptured ? '(P) ' : '');
-
tablePrinter.addCell('t=');
var date = timeutil.convertTimeTicksToDate(entry.orig.time) ;
var tCell = tablePrinter.addCell(date.getTime());
@@ -80,9 +75,8 @@ printLogEntriesAsText = function(logEntries, parent) {
// Output the extra parameters.
if (entry.orig.params != undefined) {
- // Those 6 skipped cells are: passive annotation, two for "t=", and
- // three for "st=".
- tablePrinter.setNewRowCellIndent(6 + entry.getDepth());
+ // Those 5 skipped cells are: two for "t=", and three for "st=".
+ tablePrinter.setNewRowCellIndent(5 + entry.getDepth());
addRowsForExtraParams(tablePrinter,
entry.orig,
g_browser.sourceTracker.getSecurityStripping());
diff --git a/chrome/browser/resources/net_internals/proxy_view.html b/chrome/browser/resources/net_internals/proxy_view.html
index 59969e4..b5d9bca 100644
--- a/chrome/browser/resources/net_internals/proxy_view.html
+++ b/chrome/browser/resources/net_internals/proxy_view.html
@@ -31,17 +31,6 @@
</tr></table>
- <h4>Proxy auto-config initialization</h4>
- <ul>
- <li>
- <a href='#events&q=type:PROXY_SCRIPT_DECIDER'>View all events</a>
- </li>
- <li>
- Latest proxy resolver event:
- <div id=proxy-view-resolver-log class=event-log></div>
- </li>
- </ul>
-
<h4>
Proxies which have failed recently, and are marked as bad
<input type=button value="Clear bad proxies" id=proxy-view-clear-bad-proxies class="hideOnLoadLog" />
diff --git a/chrome/browser/resources/net_internals/proxy_view.js b/chrome/browser/resources/net_internals/proxy_view.js
index e33589a..a97d36b 100644
--- a/chrome/browser/resources/net_internals/proxy_view.js
+++ b/chrome/browser/resources/net_internals/proxy_view.js
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,7 +7,6 @@
*
* - Shows the current proxy settings.
* - Has a button to reload these settings.
- * - Shows the log entries for the most recent PROXY_SCRIPT_DECIDER source
* - Shows the list of proxy hostnames that are cached as "bad".
* - Has a button to clear the cached bad proxies.
*/
@@ -26,8 +25,6 @@ var ProxyView = (function() {
// Call superclass's constructor.
superClass.call(this, ProxyView.MAIN_BOX_ID);
- this.latestProxySourceId_ = 0;
-
// Hook up the UI components.
$(ProxyView.RELOAD_SETTINGS_BUTTON_ID).onclick =
g_browser.sendReloadProxySettings.bind(g_browser);
@@ -37,7 +34,6 @@ var ProxyView = (function() {
// Register to receive proxy information as it changes.
g_browser.addProxySettingsObserver(this, true);
g_browser.addBadProxiesObserver(this, true);
- g_browser.sourceTracker.addSourceEntryObserver(this);
}
// ID for special HTML element in category_tabs.html
@@ -50,7 +46,6 @@ var ProxyView = (function() {
ProxyView.RELOAD_SETTINGS_BUTTON_ID = 'proxy-view-reload-settings';
ProxyView.BAD_PROXIES_TBODY_ID = 'proxy-view-bad-proxies-tbody';
ProxyView.CLEAR_BAD_PROXIES_BUTTON_ID = 'proxy-view-clear-bad-proxies';
- ProxyView.PROXY_RESOLVER_LOG_DIV_ID = 'proxy-view-resolver-log';
cr.addSingletonGetter(ProxyView);
@@ -58,32 +53,11 @@ var ProxyView = (function() {
// Inherit the superclass's methods.
__proto__: superClass.prototype,
- onLoadLogStart: function(data) {
- // Need to reset this so the latest proxy source from the dump can be
- // identified when the log entries are loaded.
- this.latestProxySourceId_ = 0;
- },
-
- onLoadLogFinish: function(data, tabData) {
- // It's possible that the last PROXY_SCRIPT_DECIDER source was deleted
- // from the log, but earlier sources remain. When that happens, clear the
- // list of entries here, to avoid displaying misleading information.
- if (tabData != this.latestProxySourceId_)
- this.clearLog_();
+ onLoadLogFinish: function(data) {
return this.onProxySettingsChanged(data.proxySettings) &&
this.onBadProxiesChanged(data.badProxies);
},
- /**
- * Save view-specific state.
- *
- * Save the greatest seen proxy source id, so we will not incorrectly
- * identify the log source associated with the current proxy configuration.
- */
- saveState: function() {
- return this.latestProxySourceId_;
- },
-
onProxySettingsChanged: function(proxySettings) {
// Both |original| and |effective| are dictionaries describing the
// settings.
@@ -123,47 +97,6 @@ var ProxyView = (function() {
addTextNode(badUntilCell, badUntilDate.toLocaleString());
}
return true;
- },
-
- /**
- * Called whenever SourceEntries are updated with new log entries. Updates
- * |proxyResolverLogPre_| with the log entries of the PROXY_SCRIPT_DECIDER
- * SourceEntry with the greatest id.
- */
- onSourceEntriesUpdated: function(sourceEntries) {
- for (var i = sourceEntries.length - 1; i >= 0; --i) {
- var sourceEntry = sourceEntries[i];
-
- if (sourceEntry.getSourceType() != LogSourceType.PROXY_SCRIPT_DECIDER ||
- this.latestProxySourceId_ > sourceEntry.getSourceId()) {
- continue;
- }
-
- this.latestProxySourceId_ = sourceEntry.getSourceId();
-
- $(ProxyView.PROXY_RESOLVER_LOG_DIV_ID).innerHTML = '';
- sourceEntry.printAsText($(ProxyView.PROXY_RESOLVER_LOG_DIV_ID));
- }
- },
-
- /**
- * Clears the display of and log entries for the last proxy lookup.
- */
- clearLog_: function() {
- // Prevents display of partial logs.
- ++this.latestProxySourceId_;
-
- $(ProxyView.PROXY_RESOLVER_LOG_DIV_ID).innerHTML = '';
- $(ProxyView.PROXY_RESOLVER_LOG_DIV_ID).innerText = 'Deleted.';
- },
-
- onSourceEntriesDeleted: function(sourceIds) {
- if (sourceIds.indexOf(this.latestProxySourceId_) != -1)
- this.clearLog_();
- },
-
- onAllSourceEntriesDeleted: function() {
- this.clearLog_();
}
};
diff --git a/chrome/browser/resources/net_internals/source_tracker.js b/chrome/browser/resources/net_internals/source_tracker.js
index 153f529..d257f25 100644
--- a/chrome/browser/resources/net_internals/source_tracker.js
+++ b/chrome/browser/resources/net_internals/source_tracker.js
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -40,8 +40,6 @@ var SourceTracker = (function() {
// Needed to simplify deletion, identify associated GUI elements, etc.
this.nextSourcelessEventId_ = -1;
- this.numPassivelyCapturedEvents_ = 0;
-
// Ordered list of log entries. Needed to maintain original order when
// generating log dumps
this.capturedEvents_ = [];
@@ -67,16 +65,8 @@ var SourceTracker = (function() {
* Returns the number of events that were captured while we were
* listening for events.
*/
- getNumActivelyCapturedEvents: function() {
- return this.capturedEvents_.length - this.numPassivelyCapturedEvents_;
- },
-
- /**
- * Returns the number of events that were captured passively by the
- * browser prior to when the net-internals page was started.
- */
- getNumPassivelyCapturedEvents: function() {
- return this.numPassivelyCapturedEvents_;
+ getNumCapturedEvents: function() {
+ return this.capturedEvents_.length;
},
/**
@@ -97,28 +87,6 @@ var SourceTracker = (function() {
return this.sourceEntries_[id];
},
- onReceivedPassiveLogEntries: function(logEntries) {
- // Due to an expected race condition, it is possible to receive actively
- // captured log entries before the passively logged entries are received.
- //
- // When that happens, we create a copy of the actively logged entries,
- // delete all entries, and, after handling all the passively logged
- // entries, add back the deleted actively logged entries.
- var earlyActivelyCapturedEvents = this.capturedEvents_.slice(0);
- if (earlyActivelyCapturedEvents.length > 0)
- this.deleteAllSourceEntries();
-
- this.numPassivelyCapturedEvents_ = logEntries.length;
- for (var i = 0; i < logEntries.length; ++i)
- logEntries[i].wasPassivelyCaptured = true;
-
- this.onReceivedLogEntries(logEntries);
-
- // Add back early actively captured events, if any.
- if (earlyActivelyCapturedEvents.length)
- this.onReceivedLogEntries(earlyActivelyCapturedEvents);
- },
-
/**
* Sends each entry to all observers and updates |capturedEvents_|.
* Also assigns unique ids to log entries without a source.
@@ -179,11 +147,8 @@ var SourceTracker = (function() {
var newEventList = [];
for (var i = 0; i < this.capturedEvents_.length; ++i) {
var id = this.capturedEvents_[i].source.id;
- if (id in sourceIdDict) {
- if (this.capturedEvents_[i].wasPassivelyCaptured)
- --this.numPassivelyCapturedEvents_;
+ if (id in sourceIdDict)
continue;
- }
newEventList.push(this.capturedEvents_[i]);
}
this.capturedEvents_ = newEventList;
diff --git a/chrome/browser/ui/webui/net_internals/net_internals_ui.cc b/chrome/browser/ui/webui/net_internals/net_internals_ui.cc
index a730d91..211973f 100644
--- a/chrome/browser/ui/webui/net_internals/net_internals_ui.cc
+++ b/chrome/browser/ui/webui/net_internals/net_internals_ui.cc
@@ -29,7 +29,6 @@
#include "chrome/browser/io_thread.h"
#include "chrome/browser/net/chrome_net_log.h"
#include "chrome/browser/net/connection_tester.h"
-#include "chrome/browser/net/passive_log_collector.h"
#include "chrome/browser/net/url_fixer_upper.h"
#include "chrome/browser/prefs/pref_member.h"
#include "chrome/browser/prerender/prerender_manager.h"
@@ -304,10 +303,6 @@ class NetInternalsMessageHandler::IOThreadImpl
// IO thread.
void Detach();
- // Sends all passive log entries in |passive_entries| to the Javascript
- // handler, called on the IO thread.
- void SendPassiveLogEntries(const ChromeNetLog::EntryList& passive_entries);
-
// Called when the WebUI is deleted. Prevents calling Javascript functions
// afterwards. Called on UI thread.
void OnWebUIDeleted();
@@ -807,23 +802,6 @@ void NetInternalsMessageHandler::IOThreadImpl::Detach() {
connection_tester_.reset();
}
-void NetInternalsMessageHandler::IOThreadImpl::SendPassiveLogEntries(
- const ChromeNetLog::EntryList& passive_entries) {
- DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
- ListValue* dict_list = new ListValue();
- for (size_t i = 0; i < passive_entries.size(); ++i) {
- const ChromeNetLog::Entry& e = passive_entries[i];
- dict_list->Append(net::NetLog::EntryToDictionaryValue(e.type,
- e.time,
- e.source,
- e.phase,
- e.params,
- false));
- }
-
- SendJavascriptCommand("receivedPassiveLogEntries", dict_list);
-}
-
void NetInternalsMessageHandler::IOThreadImpl::OnWebUIDeleted() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
was_webui_deleted_ = true;
@@ -839,10 +817,7 @@ void NetInternalsMessageHandler::IOThreadImpl::OnRendererReady(
// Register with network stack to observe events.
is_observing_log_ = true;
- ChromeNetLog::EntryList entries;
- AddAsObserverAndGetAllPassivelyCapturedEvents(io_thread_->net_log(),
- &entries);
- SendPassiveLogEntries(entries);
+ AddAsObserver(io_thread_->net_log());
}
void NetInternalsMessageHandler::IOThreadImpl::OnGetProxySettings(
diff --git a/chrome/chrome_browser.gypi b/chrome/chrome_browser.gypi
index a73af7c..d0cf916 100644
--- a/chrome/chrome_browser.gypi
+++ b/chrome/chrome_browser.gypi
@@ -1688,8 +1688,6 @@
'browser/net/net_pref_observer.h',
'browser/net/network_stats.cc',
'browser/net/network_stats.h',
- 'browser/net/passive_log_collector.cc',
- 'browser/net/passive_log_collector.h',
'browser/net/preconnect.cc',
'browser/net/preconnect.h',
'browser/net/predictor.cc',
diff --git a/chrome/chrome_tests.gypi b/chrome/chrome_tests.gypi
index d97b136..6ad74c0 100644
--- a/chrome/chrome_tests.gypi
+++ b/chrome/chrome_tests.gypi
@@ -1539,7 +1539,6 @@
'browser/net/http_server_properties_manager_unittest.cc',
'browser/net/load_timing_observer_unittest.cc',
'browser/net/network_stats_unittest.cc',
- 'browser/net/passive_log_collector_unittest.cc',
'browser/net/predictor_unittest.cc',
'browser/net/pref_proxy_config_tracker_impl_unittest.cc',
'browser/net/quoted_printable_unittest.cc',