summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/browser/browser_about_handler.cc5
-rw-r--r--chrome/browser/dom_ui/chrome_url_data_manager.cc8
-rw-r--r--chrome/browser/dom_ui/net_internals_ui.cc25
-rw-r--r--chrome/browser/net/view_http_cache_job_factory.cc62
-rw-r--r--chrome/browser/net/view_http_cache_job_factory.h (renamed from chrome/browser/net/view_net_internals_job_factory.h)8
-rw-r--r--chrome/browser/net/view_net_internals_job_factory.cc875
-rw-r--r--chrome/browser/resources/net_internals/httpcacheview.js2
-rw-r--r--chrome/chrome_browser.gypi4
-rw-r--r--chrome/common/url_constants.cc5
-rw-r--r--net/base/host_resolver_impl.cc170
-rw-r--r--net/base/host_resolver_impl.h14
-rw-r--r--net/base/net_log.h4
-rw-r--r--net/base/net_log_util.cc235
-rw-r--r--net/base/net_log_util.h76
-rw-r--r--net/base/net_log_util_unittest.cc132
-rw-r--r--net/net.gyp3
-rwxr-xr-xnet/tools/dns_trace_formatter/formatter.py310
-rwxr-xr-xnet/tools/dns_trace_formatter/parser.py139
-rwxr-xr-xnet/tools/dns_trace_formatter/parser_test.py73
19 files changed, 102 insertions, 2048 deletions
diff --git a/chrome/browser/browser_about_handler.cc b/chrome/browser/browser_about_handler.cc
index e3f8dbd6..ec99a98 100644
--- a/chrome/browser/browser_about_handler.cc
+++ b/chrome/browser/browser_about_handler.cc
@@ -910,10 +910,9 @@ bool WillHandleBrowserAboutURL(GURL* url, Profile* profile) {
if (LowerCaseEqualsASCII(url->spec(), chrome::kAboutBlankURL))
return false;
- // Rewrite about:cache/* URLs to chrome://net-internals/view-cache/*
+ // Rewrite about:cache/* URLs to chrome://view-http-cache/*
if (StartsWithAboutSpecifier(*url, chrome::kAboutCacheURL)) {
- *url = RemapAboutURL(chrome::kNetworkViewCacheURL + std::string("/"),
- *url);
+ *url = RemapAboutURL(chrome::kNetworkViewCacheURL, *url);
return true;
}
diff --git a/chrome/browser/dom_ui/chrome_url_data_manager.cc b/chrome/browser/dom_ui/chrome_url_data_manager.cc
index ea22d76..e21e849 100644
--- a/chrome/browser/dom_ui/chrome_url_data_manager.cc
+++ b/chrome/browser/dom_ui/chrome_url_data_manager.cc
@@ -20,7 +20,7 @@
#include "chrome/browser/browser_process.h"
#include "chrome/browser/chrome_thread.h"
#include "chrome/browser/net/chrome_url_request_context.h"
-#include "chrome/browser/net/view_net_internals_job_factory.h"
+#include "chrome/browser/net/view_http_cache_job_factory.h"
#include "chrome/common/chrome_paths.h"
#include "chrome/common/ref_counted_util.h"
#include "chrome/common/url_constants.h"
@@ -325,9 +325,9 @@ URLRequestJob* ChromeURLDataManager::Factory(URLRequest* request,
if (ChromeURLDataManager::URLToFilePath(request->url(), &path))
return new URLRequestChromeFileJob(request, path);
- // Next check for chrome://net-internals/, which uses its own job type.
- if (ViewNetInternalsJobFactory::IsSupportedURL(request->url()))
- return ViewNetInternalsJobFactory::CreateJobForRequest(request);
+ // Next check for chrome://view-http-cache/*, which uses its own job type.
+ if (ViewHttpCacheJobFactory::IsSupportedURL(request->url()))
+ return ViewHttpCacheJobFactory::CreateJobForRequest(request);
// Next check for chrome://appcache-internals/, which uses its own job type.
if (ViewAppCacheInternalsJobFactory::IsSupportedURL(request->url()))
diff --git a/chrome/browser/dom_ui/net_internals_ui.cc b/chrome/browser/dom_ui/net_internals_ui.cc
index 80281ea..8ddad85 100644
--- a/chrome/browser/dom_ui/net_internals_ui.cc
+++ b/chrome/browser/dom_ui/net_internals_ui.cc
@@ -323,9 +323,28 @@ void NetInternalsHTMLSource::StartDataRequest(const std::string& path,
if (!file_util::ReadFileToString(file_path, &data_string)) {
LOG(WARNING) << "Could not read resource: " << file_path.value();
- data_string = StringPrintf(
- "Failed to read file RESOURCES/net_internals/%s",
- filename.c_str());
+ data_string = StringPrintf("<p style='color:red'>Failed to read file "
+ "RESOURCES/net_internals/%s</p>",
+ EscapeForHTML(filename).c_str());
+
+ // During the transition from old implementation to new implementation,
+ // users may be entering the URLs for the old frontend.
+ data_string.append(
+ "<p>Note that the URL scheme for net-internals has changed because of "
+ "its new implementation (bug 37421):</p>"
+ "<ul>"
+ "<li>chrome://net-internals/proxyservice.* &rarr; "
+ "<a href='chrome://net-internals#proxy'>chrome://net-internals#proxy"
+ "</a></li>"
+ "<li>chrome://net-internals/hostresolver.* &rarr; <a href='chrome://net"
+ "-internals#dns'>chrome://net-internals#dns</a></li>"
+ "<li>chrome://net-internals/urlrequest.* &rarr; <a href='chrome://net-"
+ "internals#requests'>chrome://net-internals#requests</a></li>"
+ "<li>chrome://net-internals/ (overview for copy-pasting) &rarr; <a href"
+ "='chrome://net-internals#data'>chrome://net-internals#data</a></li>"
+ "<li>chrome://net-internals/view-cache/* &rarr; <a href="
+ "'chrome://view-http-cache'>chrome://view-http-cache</a></li>"
+ "</ul>");
}
scoped_refptr<RefCountedBytes> bytes(new RefCountedBytes);
diff --git a/chrome/browser/net/view_http_cache_job_factory.cc b/chrome/browser/net/view_http_cache_job_factory.cc
new file mode 100644
index 0000000..ce10ff1
--- /dev/null
+++ b/chrome/browser/net/view_http_cache_job_factory.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/net/view_http_cache_job_factory.h"
+
+#include "chrome/common/url_constants.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_simple_job.h"
+#include "net/url_request/view_cache_helper.h"
+
+namespace {
+
+// A job subclass that dumps an HTTP cache entry.
+class ViewHttpCacheJob : public URLRequestSimpleJob {
+ public:
+ explicit ViewHttpCacheJob(URLRequest* request)
+ : URLRequestSimpleJob(request) {}
+
+ // URLRequestSimpleJob methods:
+ virtual bool GetData(std::string* mime_type,
+ std::string* charset,
+ std::string* data) const;
+
+ private:
+ ~ViewHttpCacheJob() {}
+};
+
+bool ViewHttpCacheJob::GetData(std::string* mime_type,
+ std::string* charset,
+ std::string* data) const {
+ mime_type->assign("text/html");
+ charset->assign("UTF-8");
+
+ data->clear();
+
+ std::string cache_key;
+ cache_key =
+ request_->url().spec().substr(strlen(chrome::kNetworkViewCacheURL));
+ ViewCacheHelper::GetEntryInfoHTML(cache_key,
+ request_->context(),
+ chrome::kNetworkViewCacheURL,
+ data);
+
+ return true;
+}
+
+} // namespace
+
+// static
+bool ViewHttpCacheJobFactory::IsSupportedURL(const GURL& url) {
+ return StartsWithASCII(url.spec(),
+ chrome::kNetworkViewCacheURL,
+ true /*case_sensitive*/);
+}
+
+// static
+URLRequestJob* ViewHttpCacheJobFactory::CreateJobForRequest(
+ URLRequest* request) {
+ return new ViewHttpCacheJob(request);
+}
diff --git a/chrome/browser/net/view_net_internals_job_factory.h b/chrome/browser/net/view_http_cache_job_factory.h
index a694c93..e3fa152 100644
--- a/chrome/browser/net/view_net_internals_job_factory.h
+++ b/chrome/browser/net/view_http_cache_job_factory.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef CHROME_BROWSER_NET_VIEW_NET_INTERNALS_JOB_FACTORY_H_
-#define CHROME_BROWSER_NET_VIEW_NET_INTERNALS_JOB_FACTORY_H_
+#ifndef CHROME_BROWSER_NET_VIEW_HTTP_CACHE_JOB_FACTORY_H_
+#define CHROME_BROWSER_NET_VIEW_HTTP_CACHE_JOB_FACTORY_H_
class GURL;
class URLRequest;
class URLRequestJob;
-class ViewNetInternalsJobFactory {
+class ViewHttpCacheJobFactory {
public:
static bool IsSupportedURL(const GURL& url);
static URLRequestJob* CreateJobForRequest(URLRequest* request);
};
-#endif // CHROME_BROWSER_NET_VIEW_NET_INTERNALS_JOB_FACTORY_H_
+#endif // CHROME_BROWSER_NET_VIEW_HTTP_CACHE_JOB_FACTORY_H_
diff --git a/chrome/browser/net/view_net_internals_job_factory.cc b/chrome/browser/net/view_net_internals_job_factory.cc
deleted file mode 100644
index 213ae5f..0000000
--- a/chrome/browser/net/view_net_internals_job_factory.cc
+++ /dev/null
@@ -1,875 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome/browser/net/view_net_internals_job_factory.h"
-
-#include <sstream>
-
-#include "base/format_macros.h"
-#include "base/stl_util-inl.h"
-#include "base/string_util.h"
-#include "chrome/browser/net/chrome_net_log.h"
-#include "chrome/browser/net/chrome_url_request_context.h"
-#include "chrome/browser/net/passive_log_collector.h"
-#include "chrome/common/url_constants.h"
-#include "net/base/escape.h"
-#include "net/base/host_resolver_impl.h"
-#include "net/base/net_errors.h"
-#include "net/base/net_log_util.h"
-#include "net/base/net_util.h"
-#include "net/base/sys_addrinfo.h"
-#include "net/proxy/proxy_service.h"
-#include "net/socket_stream/socket_stream.h"
-#include "net/url_request/url_request.h"
-#include "net/url_request/url_request_context.h"
-#include "net/url_request/url_request_simple_job.h"
-#include "net/url_request/view_cache_helper.h"
-
-namespace {
-
-const char kViewHttpCacheSubPath[] = "view-cache";
-
-// TODO(eroman): Delete this file. It should be replaced by
-// chrome/browser/dom_ui/net_internals_ui.cc once the porting is
-// complete.
-
-PassiveLogCollector* GetPassiveLogCollector(URLRequestContext* context) {
- // Really this is the same as:
- // g_browser_process->io_thread()->globals()->
- // net_log.get()
- // (But we can't access g_browser_process from the IO thread).
- ChromeNetLog* chrome_net_log = static_cast<ChromeNetLog*>(
- static_cast<ChromeURLRequestContext*>(context)->net_log());
- return chrome_net_log->passive_collector();
-}
-
-PassiveLogCollector::RequestTracker* GetURLRequestTracker(
- URLRequestContext* context) {
- return GetPassiveLogCollector(context)->url_request_tracker();
-}
-
-PassiveLogCollector::RequestTracker* GetSocketStreamTracker(
- URLRequestContext* context) {
- return GetPassiveLogCollector(context)->socket_stream_tracker();
-}
-
-PassiveLogCollector::InitProxyResolverTracker* GetInitProxyResolverTracker(
- URLRequestContext* context) {
- return GetPassiveLogCollector(context)->init_proxy_resolver_tracker();
-}
-
-std::string GetDetails(const GURL& url) {
- DCHECK(ViewNetInternalsJobFactory::IsSupportedURL(url));
- size_t start = strlen(chrome::kNetworkViewInternalsURL);
- if (start >= url.spec().size())
- return std::string();
- return url.spec().substr(start);
-}
-
-GURL MakeURL(const std::string& details) {
- return GURL(std::string(chrome::kNetworkViewInternalsURL) + details);
-}
-
-// Converts a PassiveLogCollector::EntryList to a CapturingNetLog::EntryList.
-//
-// They are basically the same thing, except PassiveLogCollector has an extra
-// "order" field which we will drop.
-net::CapturingNetLog::EntryList ConvertEntryList(
- const PassiveLogCollector::EntryList& input) {
- net::CapturingNetLog::EntryList result;
- for (size_t i = 0; i < input.size(); ++i) {
- result.push_back(
- net::CapturingNetLog::Entry(
- input[i].type,
- input[i].time,
- input[i].source,
- input[i].phase,
- input[i].extra_parameters));
- }
- return result;
-}
-
-// A job subclass that implements a protocol to inspect the internal
-// state of the network stack.
-class ViewNetInternalsJob : public URLRequestSimpleJob {
- public:
-
- explicit ViewNetInternalsJob(URLRequest* request)
- : URLRequestSimpleJob(request) {}
-
- // URLRequestSimpleJob methods:
- virtual bool GetData(std::string* mime_type,
- std::string* charset,
- std::string* data) const;
-
- // Overridden methods from URLRequestJob:
- virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
-
- private:
- ~ViewNetInternalsJob() {}
-
- // Returns true if the current request is for a "view-cache" URL.
- // If it is, then |key| is assigned the particular cache URL of the request.
- bool GetViewCacheKeyForRequest(std::string* key) const;
-};
-
-//------------------------------------------------------------------------------
-// Format helpers.
-//------------------------------------------------------------------------------
-
-void OutputTextInPre(const std::string& text, std::string* out) {
- out->append("<pre>");
- out->append(EscapeForHTML(text));
- out->append("</pre>");
-}
-
-// Appends an input button to |data| with text |title| that sends the command
-// string |command| back to the browser, and then refreshes the page.
-void DrawCommandButton(const std::string& title,
- const std::string& command,
- std::string* data) {
- StringAppendF(data, "<input type=\"button\" value=\"%s\" "
- "onclick=\"DoCommand('%s')\" />",
- title.c_str(),
- command.c_str());
-}
-
-//------------------------------------------------------------------------------
-// URLRequestContext helpers.
-//------------------------------------------------------------------------------
-
-net::HostResolverImpl* GetHostResolverImpl(URLRequestContext* context) {
- return context->host_resolver()->GetAsHostResolverImpl();
-}
-
-net::HostCache* GetHostCache(URLRequestContext* context) {
- if (GetHostResolverImpl(context))
- return GetHostResolverImpl(context)->cache();
- return NULL;
-}
-
-//------------------------------------------------------------------------------
-// Subsection definitions.
-//------------------------------------------------------------------------------
-
-class SubSection {
- public:
- // |name| is the URL path component for this subsection.
- // |title| is the textual description.
- SubSection(SubSection* parent, const char* name, const char* title)
- : parent_(parent),
- name_(name),
- title_(title) {
- }
-
- virtual ~SubSection() {
- STLDeleteContainerPointers(children_.begin(), children_.end());
- }
-
- // Outputs the subsection's contents to |out|.
- virtual void OutputBody(URLRequestContext* context, std::string* out) {}
-
- // Outputs this subsection, and all of its children.
- void OutputRecursive(URLRequestContext* context, std::string* out) {
- if (!is_root()) {
- // Canonicalizing the URL escapes characters which cause problems in HTML.
- std::string section_url = MakeURL(GetFullyQualifiedName()).spec();
-
- // Print the heading.
- StringAppendF(
- out,
- "<div>"
- "<span class=subsection_title>%s</span> "
- "<span class=subsection_name>(<a href='%s'>%s</a>)<span>"
- "</div>",
- EscapeForHTML(title_).c_str(),
- section_url.c_str(),
- EscapeForHTML(section_url).c_str());
-
- out->append("<div class=subsection_body>");
- }
-
- OutputBody(context, out);
-
- for (size_t i = 0; i < children_.size(); ++i)
- children_[i]->OutputRecursive(context, out);
-
- if (!is_root())
- out->append("</div>");
- }
-
- // Returns the SubSection contained by |this| with fully qualified name
- // |dotted_name|, or NULL if none was found.
- SubSection* FindSubSectionByName(const std::string& dotted_name) {
- if (dotted_name == "")
- return this;
-
- std::string child_name;
- std::string child_sub_name;
-
- size_t split_pos = dotted_name.find('.');
- if (split_pos == std::string::npos) {
- child_name = dotted_name;
- child_sub_name = std::string();
- } else {
- child_name = dotted_name.substr(0, split_pos);
- child_sub_name = dotted_name.substr(split_pos + 1);
- }
-
- for (size_t i = 0; i < children_.size(); ++i) {
- if (children_[i]->name_ == child_name)
- return children_[i]->FindSubSectionByName(child_sub_name);
- }
-
- return NULL; // Not found.
- }
-
- std::string GetFullyQualifiedName() {
- if (!parent_)
- return name_;
-
- std::string parent_name = parent_->GetFullyQualifiedName();
- if (parent_name.empty())
- return name_;
-
- return parent_name + "." + name_;
- }
-
- bool is_root() const {
- return parent_ == NULL;
- }
-
- protected:
- typedef std::vector<SubSection*> SubSectionList;
-
- void AddSubSection(SubSection* subsection) {
- children_.push_back(subsection);
- }
-
- SubSection* parent_;
- std::string name_;
- std::string title_;
-
- SubSectionList children_;
-};
-
-class ProxyServiceCurrentConfigSubSection : public SubSection {
- public:
- explicit ProxyServiceCurrentConfigSubSection(SubSection* parent)
- : SubSection(parent, "config", "Current configuration") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- DrawCommandButton("Force reload", "reload-proxy-config", out);
-
- net::ProxyService* proxy_service = context->proxy_service();
- if (proxy_service->config_has_been_initialized()) {
- // net::ProxyConfig defines an operator<<.
- std::ostringstream stream;
- stream << proxy_service->config();
- OutputTextInPre(stream.str(), out);
- } else {
- out->append("<i>Not yet initialized</i>");
- }
- }
-};
-
-class ProxyServiceLastInitLogSubSection : public SubSection {
- public:
- explicit ProxyServiceLastInitLogSubSection(SubSection* parent)
- : SubSection(parent, "init_log", "Last initialized load log") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- OutputTextInPre(
- net::NetLogUtil::PrettyPrintAsEventTree(
- ConvertEntryList(GetInitProxyResolverTracker(context)->entries()),
- 0),
- out);
- }
-};
-
-class ProxyServiceBadProxiesSubSection : public SubSection {
- public:
- explicit ProxyServiceBadProxiesSubSection(SubSection* parent)
- : SubSection(parent, "bad_proxies", "Bad Proxies") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- net::ProxyService* proxy_service = context->proxy_service();
- const net::ProxyRetryInfoMap& bad_proxies_map =
- proxy_service->proxy_retry_info();
-
- DrawCommandButton("Clear", "clear-badproxies", out);
-
- out->append("<table border=1>");
- out->append("<tr><th>Bad proxy server</th>"
- "<th>Remaining time until retry (ms)</th></tr>");
-
- for (net::ProxyRetryInfoMap::const_iterator it = bad_proxies_map.begin();
- it != bad_proxies_map.end(); ++it) {
- const std::string& proxy_uri = it->first;
- const net::ProxyRetryInfo& retry_info = it->second;
-
- // Note that ttl_ms may be negative, for the cases where entries have
- // expired but not been garbage collected yet.
- int ttl_ms = static_cast<int>(
- (retry_info.bad_until - base::TimeTicks::Now()).InMilliseconds());
-
- // Color expired entries blue.
- if (ttl_ms > 0)
- out->append("<tr>");
- else
- out->append("<tr style='color:blue'>");
-
- StringAppendF(out, "<td>%s</td><td>%d</td>",
- EscapeForHTML(proxy_uri).c_str(),
- ttl_ms);
-
- out->append("</tr>");
- }
- out->append("</table>");
- }
-};
-
-class ProxyServiceSubSection : public SubSection {
- public:
- explicit ProxyServiceSubSection(SubSection* parent)
- : SubSection(parent, "proxyservice", "ProxyService") {
- AddSubSection(new ProxyServiceCurrentConfigSubSection(this));
- AddSubSection(new ProxyServiceLastInitLogSubSection(this));
- AddSubSection(new ProxyServiceBadProxiesSubSection(this));
- }
-};
-
-class HostResolverCacheSubSection : public SubSection {
- public:
- explicit HostResolverCacheSubSection(SubSection* parent)
- : SubSection(parent, "hostcache", "HostCache") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- const net::HostCache* host_cache = GetHostCache(context);
-
- if (!host_cache || host_cache->caching_is_disabled()) {
- out->append("<i>Caching is disabled.</i>");
- return;
- }
-
- DrawCommandButton("Clear", "clear-hostcache", out);
-
- StringAppendF(
- out,
- "<ul><li>Size: %" PRIuS "</li>"
- "<li>Capacity: %" PRIuS "</li>"
- "<li>Time to live (ms) for success entries: %" PRId64"</li>"
- "<li>Time to live (ms) for failure entries: %" PRId64"</li></ul>",
- host_cache->size(),
- host_cache->max_entries(),
- host_cache->success_entry_ttl().InMilliseconds(),
- host_cache->failure_entry_ttl().InMilliseconds());
-
- out->append("<table border=1>"
- "<tr>"
- "<th>Host</th>"
- "<th>Address family</th>"
- "<th>Address list</th>"
- "<th>Canonical name</th>"
- "<th>Time to live (ms)</th>"
- "</tr>");
-
- for (net::HostCache::EntryMap::const_iterator it =
- host_cache->entries().begin();
- it != host_cache->entries().end();
- ++it) {
- const net::HostCache::Key& key = it->first;
- const net::HostCache::Entry* entry = it->second.get();
-
- std::string address_family_str =
- AddressFamilyToString(key.address_family);
-
- // Note that ttl_ms may be negative, for the cases where entries have
- // expired but not been garbage collected yet.
- int ttl_ms = static_cast<int>(
- (entry->expiration - base::TimeTicks::Now()).InMilliseconds());
-
- // Color expired entries blue.
- if (ttl_ms > 0) {
- out->append("<tr>");
- } else {
- out->append("<tr style='color:blue'>");
- }
-
- // Stringify all of the addresses in the address list, separated
- // by newlines (br).
- std::string address_list_html;
- std::string canonical_name_html;
-
- if (entry->error != net::OK) {
- address_list_html = "<span style='font-weight: bold; color:red'>" +
- EscapeForHTML(net::ErrorToString(entry->error)) +
- "</span>";
- } else {
- const struct addrinfo* current_address = entry->addrlist.head();
- while (current_address) {
- if (!address_list_html.empty())
- address_list_html += "<br>";
- address_list_html += EscapeForHTML(
- net::NetAddressToString(current_address));
- current_address = current_address->ai_next;
- }
- std::string canonical_name;
- if (entry->addrlist.GetCanonicalName(&canonical_name))
- canonical_name_html = EscapeForHTML(canonical_name);
- }
-
- StringAppendF(out,
- "<td>%s</td><td>%s</td><td>%s</td>"
- "<td>%s</td><td>%d</td></tr>",
- EscapeForHTML(key.hostname).c_str(),
- EscapeForHTML(address_family_str).c_str(),
- address_list_html.c_str(),
- canonical_name_html.c_str(),
- ttl_ms);
- }
-
- out->append("</table>");
- }
-
- static std::string AddressFamilyToString(net::AddressFamily address_family) {
- switch (address_family) {
- case net::ADDRESS_FAMILY_IPV4:
- return "IPV4";
- case net::ADDRESS_FAMILY_IPV6:
- return "IPV6";
- case net::ADDRESS_FAMILY_UNSPECIFIED:
- return "UNSPECIFIED";
- default:
- NOTREACHED();
- return "???";
- }
- }
-};
-
-class HostResolverTraceSubSection : public SubSection {
- public:
- explicit HostResolverTraceSubSection(SubSection* parent)
- : SubSection(parent, "trace", "Trace of requests") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- net::HostResolverImpl* resolver = GetHostResolverImpl(context);
- if (!resolver) {
- out->append("<i>Tracing is not supported by this resolver.</i>");
- return;
- }
-
- DrawCommandButton("Clear", "clear-hostresolver-trace", out);
-
- if (resolver->IsRequestsTracingEnabled()) {
- DrawCommandButton("Disable tracing", "hostresolver-trace-disable", out);
- } else {
- DrawCommandButton("Enable tracing", "hostresolver-trace-enable", out);
- }
-
- net::CapturingNetLog::EntryList entries;
- if (resolver->GetRequestsTrace(&entries)) {
- out->append(
- "<p>To make sense of this trace, process it with the Python script "
- "formatter.py at "
- "<a href='http://src.chromium.org/viewvc/chrome/trunk/src/net/tools/"
- "dns_trace_formatter/'>net/tools/dns_trace_formatter</a></p>");
- OutputTextInPre(net::NetLogUtil::PrettyPrintAsEventTree(entries, 0),
- out);
- } else {
- out->append("<p><i>No trace information, must enable tracing.</i></p>");
- }
- }
-};
-
-class HostResolverSubSection : public SubSection {
- public:
- explicit HostResolverSubSection(SubSection* parent)
- : SubSection(parent, "hostresolver", "HostResolver") {
- AddSubSection(new HostResolverCacheSubSection(this));
- AddSubSection(new HostResolverTraceSubSection(this));
- }
-};
-
-// Helper for the URLRequest "outstanding" and "live" sections.
-void OutputURLAndLoadLog(const PassiveLogCollector::RequestInfo& request,
- std::string* out) {
- out->append("<li>");
- out->append("<nobr>");
- out->append(EscapeForHTML(request.url));
- out->append("</nobr>");
- OutputTextInPre(
- net::NetLogUtil::PrettyPrintAsEventTree(
- ConvertEntryList(request.entries),
- request.num_entries_truncated),
- out);
- out->append("</li>");
-}
-
-class URLRequestLiveSubSection : public SubSection {
- public:
- explicit URLRequestLiveSubSection(SubSection* parent)
- : SubSection(parent, "outstanding", "Outstanding requests") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- PassiveLogCollector::RequestInfoList requests =
- GetURLRequestTracker(context)->GetLiveRequests();
-
- out->append("<ol>");
- for (size_t i = 0; i < requests.size(); ++i) {
- // Reverse the list order, so we display from most recent to oldest.
- size_t index = requests.size() - i - 1;
- OutputURLAndLoadLog(requests[index], out);
- }
- out->append("</ol>");
- }
-};
-
-class URLRequestRecentSubSection : public SubSection {
- public:
- explicit URLRequestRecentSubSection(SubSection* parent)
- : SubSection(parent, "recent", "Recently completed requests") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- PassiveLogCollector::RequestInfoList recent =
- GetURLRequestTracker(context)->GetRecentlyDeceased();
-
- DrawCommandButton("Clear", "clear-urlrequest-graveyard", out);
-
- out->append("<ol>");
- for (size_t i = 0; i < recent.size(); ++i) {
- // Reverse the list order, so we dispay from most recent to oldest.
- size_t index = recent.size() - i - 1;
- OutputURLAndLoadLog(recent[index], out);
- }
- out->append("</ol>");
- }
-};
-
-class URLRequestSubSection : public SubSection {
- public:
- explicit URLRequestSubSection(SubSection* parent)
- : SubSection(parent, "urlrequest", "URLRequest") {
- AddSubSection(new URLRequestLiveSubSection(this));
- AddSubSection(new URLRequestRecentSubSection(this));
- }
-};
-
-class HttpCacheStatsSubSection : public SubSection {
- public:
- explicit HttpCacheStatsSubSection(SubSection* parent)
- : SubSection(parent, "stats", "Statistics") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- ViewCacheHelper::GetStatisticsHTML(context, out);
- }
-};
-
-class HttpCacheSection : public SubSection {
- public:
- explicit HttpCacheSection(SubSection* parent)
- : SubSection(parent, "httpcache", "HttpCache") {
- AddSubSection(new HttpCacheStatsSubSection(this));
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- // Advertise the view-cache URL (too much data to inline it).
- out->append("<p><a href='/");
- out->append(kViewHttpCacheSubPath);
- out->append("'>View all cache entries</a></p>");
- }
-};
-
-class SocketStreamLiveSubSection : public SubSection {
- public:
- explicit SocketStreamLiveSubSection(SubSection* parent)
- : SubSection(parent, "live", "Live SocketStreams") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- PassiveLogCollector::RequestInfoList sockets =
- GetSocketStreamTracker(context)->GetLiveRequests();
-
- out->append("<ol>");
- for (size_t i = 0; i < sockets.size(); ++i) {
- // Reverse the list order, so we dispay from most recent to oldest.
- size_t index = sockets.size() - i - 1;
- OutputURLAndLoadLog(sockets[index], out);
- }
- out->append("</ol>");
- }
-};
-
-class SocketStreamRecentSubSection : public SubSection {
- public:
- explicit SocketStreamRecentSubSection(SubSection* parent)
- : SubSection(parent, "recent", "Recently completed SocketStreams") {
- }
-
- virtual void OutputBody(URLRequestContext* context, std::string* out) {
- PassiveLogCollector::RequestInfoList recent =
- GetSocketStreamTracker(context)->GetRecentlyDeceased();
-
- DrawCommandButton("Clear", "clear-socketstream-graveyard", out);
-
- out->append("<ol>");
- for (size_t i = 0; i < recent.size(); ++i) {
- // Reverse the list order, so we dispay from most recent to oldest.
- size_t index = recent.size() - i - 1;
- OutputURLAndLoadLog(recent[index], out);
- }
- out->append("</ol>");
- }
-};
-
-class SocketStreamSubSection : public SubSection {
- public:
- explicit SocketStreamSubSection(SubSection* parent)
- : SubSection(parent, "socketstream", "SocketStream") {
- AddSubSection(new SocketStreamLiveSubSection(this));
- AddSubSection(new SocketStreamRecentSubSection(this));
- }
-};
-
-class AllSubSections : public SubSection {
- public:
- AllSubSections() : SubSection(NULL, "", "") {
- AddSubSection(new ProxyServiceSubSection(this));
- AddSubSection(new HostResolverSubSection(this));
- AddSubSection(new URLRequestSubSection(this));
- AddSubSection(new HttpCacheSection(this));
- AddSubSection(new SocketStreamSubSection(this));
- }
-};
-
-bool HandleCommand(const std::string& command,
- URLRequestContext* context) {
- if (StartsWithASCII(command, "full-logging-", true)) {
- bool enable_full_logging = (command == "full-logging-enable");
- GetURLRequestTracker(context)->SetUnbounded(enable_full_logging);
- GetSocketStreamTracker(context)->SetUnbounded(enable_full_logging);
- return true;
- }
-
- if (StartsWithASCII(command, "hostresolver-trace-", true)) {
- bool enable_tracing = (command == "hostresolver-trace-enable");
- if (GetHostResolverImpl(context)) {
- GetHostResolverImpl(context)->EnableRequestsTracing(enable_tracing);
- }
- }
-
- if (command == "clear-urlrequest-graveyard") {
- GetURLRequestTracker(context)->ClearRecentlyDeceased();
- return true;
- }
-
- if (command == "clear-socketstream-graveyard") {
- GetSocketStreamTracker(context)->ClearRecentlyDeceased();
- return true;
- }
-
- if (command == "clear-hostcache") {
- net::HostCache* host_cache = GetHostCache(context);
- if (host_cache)
- host_cache->clear();
- return true;
- }
-
- if (command == "clear-badproxies") {
- context->proxy_service()->ClearBadProxiesCache();
- return true;
- }
-
- if (command == "clear-hostresolver-trace") {
- if (GetHostResolverImpl(context))
- GetHostResolverImpl(context)->ClearRequestsTrace();
- }
-
- if (command == "reload-proxy-config") {
- context->proxy_service()->ForceReloadProxyConfig();
- return true;
- }
-
- return false;
-}
-
-// Process any query strings in the request (for actions like toggling
-// full logging.
-void ProcessQueryStringCommands(URLRequestContext* context,
- const std::string& query) {
- if (!StartsWithASCII(query, "commands=", true)) {
- // Not a recognized format.
- return;
- }
-
- std::string commands_str = query.substr(strlen("commands="));
- commands_str = UnescapeURLComponent(commands_str, UnescapeRule::NORMAL);
-
- // The command list is comma-separated.
- std::vector<std::string> commands;
- SplitString(commands_str, ',', &commands);
-
- for (size_t i = 0; i < commands.size(); ++i)
- HandleCommand(commands[i], context);
-}
-
-// Appends some HTML controls to |data| that allow the user to enable full
-// logging, and clear some of the already logged data.
-void DrawControlsHeader(URLRequestContext* context, std::string* data) {
- bool is_full_logging_enabled =
- GetURLRequestTracker(context)->is_unbounded() &&
- GetSocketStreamTracker(context)->is_unbounded();
-
- data->append("<div style='margin-bottom: 10px'>");
-
- if (is_full_logging_enabled) {
- DrawCommandButton("Disable full logging", "full-logging-disable", data);
- } else {
- DrawCommandButton("Enable full logging", "full-logging-enable", data);
- }
-
- DrawCommandButton("Clear all data",
- // Send a list of comma separated commands:
- "clear-badproxies,"
- "clear-hostcache,"
- "clear-urlrequest-graveyard,"
- "clear-socketstream-graveyard,"
- "clear-hostresolver-trace",
- data);
-
- data->append("</div>");
-}
-
-bool ViewNetInternalsJob::GetData(std::string* mime_type,
- std::string* charset,
- std::string* data) const {
- mime_type->assign("text/html");
- charset->assign("UTF-8");
-
- URLRequestContext* context =
- static_cast<URLRequestContext*>(request_->context());
-
- data->clear();
-
- // Use a different handler for "view-cache/*" subpaths.
- std::string cache_key;
- if (GetViewCacheKeyForRequest(&cache_key)) {
- GURL url = MakeURL(kViewHttpCacheSubPath + std::string("/"));
- ViewCacheHelper::GetEntryInfoHTML(cache_key, context, url.spec(), data);
- return true;
- }
-
- // Handle any query arguments as a command request, then redirect back to
- // the same URL stripped of query parameters. The redirect happens as part
- // of IsRedirectResponse().
- if (request_->url().has_query()) {
- ProcessQueryStringCommands(context, request_->url().query());
- return true;
- }
-
- std::string details = GetDetails(request_->url());
-
- data->append("<!DOCTYPE HTML>"
- "<html><head><title>Network internals</title>"
- "<style>"
- "body { font-family: sans-serif; font-size: 0.8em; }\n"
- "tt, code, pre { font-family: WebKitHack, monospace; }\n"
- ".subsection_body { margin: 10px 0 10px 2em; }\n"
- ".subsection_title { font-weight: bold; }\n"
- "</style>"
- "<script>\n"
-
- // Unfortunately we can't do XHR from chrome://net-internals
- // because the chrome:// protocol restricts access.
- //
- // So instead, we will send commands by doing a form
- // submission (which as a side effect will reload the page).
- "function DoCommand(command) {\n"
- " document.getElementById('cmd').value = command;\n"
- " document.getElementById('cmdsender').submit();\n"
- "}\n"
-
- "</script>\n"
- "</head><body>"
- "<form action='' method=GET id=cmdsender>"
- "<input type='hidden' id=cmd name='commands'>"
- "</form>"
- "<p><a href='http://dev.chromium.org/"
- "developers/design-documents/view-net-internals'>"
- "Help: how do I use this?</a></p>");
-
- DrawControlsHeader(context, data);
-
- SubSection* all = Singleton<AllSubSections>::get();
- SubSection* section = all;
-
- // Display only the subsection tree asked for.
- if (!details.empty())
- section = all->FindSubSectionByName(details);
-
- if (section) {
- section->OutputRecursive(context, data);
- } else {
- data->append("<i>Nothing found for \"");
- data->append(EscapeForHTML(details));
- data->append("\"</i>");
- }
-
- data->append("</body></html>");
-
- return true;
-}
-
-bool ViewNetInternalsJob::IsRedirectResponse(GURL* location,
- int* http_status_code) {
- if (request_->url().has_query() && !GetViewCacheKeyForRequest(NULL)) {
- // Strip the query parameters.
- GURL::Replacements replacements;
- replacements.ClearQuery();
- *location = request_->url().ReplaceComponents(replacements);
- *http_status_code = 307;
- return true;
- }
- return false;
-}
-
-bool ViewNetInternalsJob::GetViewCacheKeyForRequest(
- std::string* key) const {
- std::string path = GetDetails(request_->url());
- if (!StartsWithASCII(path, kViewHttpCacheSubPath, true))
- return false;
-
- if (path.size() > strlen(kViewHttpCacheSubPath) &&
- path[strlen(kViewHttpCacheSubPath)] != '/')
- return false;
-
- if (key && path.size() > strlen(kViewHttpCacheSubPath) + 1)
- *key = path.substr(strlen(kViewHttpCacheSubPath) + 1);
-
- return true;
-}
-
-} // namespace
-
-// static
-bool ViewNetInternalsJobFactory::IsSupportedURL(const GURL& url) {
- // Note that kNetworkViewInternalsURL is terminated by a '/'.
- return StartsWithASCII(url.spec(),
- chrome::kNetworkViewInternalsURL,
- true /*case_sensitive*/);
-}
-
-// static
-URLRequestJob* ViewNetInternalsJobFactory::CreateJobForRequest(
- URLRequest* request) {
- return new ViewNetInternalsJob(request);
-}
diff --git a/chrome/browser/resources/net_internals/httpcacheview.js b/chrome/browser/resources/net_internals/httpcacheview.js
index 43b1e0a..809f66f 100644
--- a/chrome/browser/resources/net_internals/httpcacheview.js
+++ b/chrome/browser/resources/net_internals/httpcacheview.js
@@ -40,7 +40,7 @@ HttpCacheView.prototype.onHttpCacheInfoReceived = function(info) {
var li = addNode(keysOl, 'li');
var a = addNode(li, 'a');
addTextNode(a, key);
- a.href = 'chrome://net-internals/view-cache/' + key;
+ a.href = 'chrome://view-http-cache/' + key;
a.target = '_blank';
}
};
diff --git a/chrome/chrome_browser.gypi b/chrome/chrome_browser.gypi
index 8f9c7c4..ea9d3eb 100644
--- a/chrome/chrome_browser.gypi
+++ b/chrome/chrome_browser.gypi
@@ -1639,8 +1639,8 @@
'browser/net/url_request_slow_http_job.h',
'browser/net/url_request_tracking.cc',
'browser/net/url_request_tracking.h',
- 'browser/net/view_net_internals_job_factory.cc',
- 'browser/net/view_net_internals_job_factory.h',
+ 'browser/net/view_http_cache_job_factory.cc',
+ 'browser/net/view_http_cache_job_factory.h',
'browser/net/websocket_experiment/websocket_experiment_runner.cc',
'browser/net/websocket_experiment/websocket_experiment_runner.h',
'browser/net/websocket_experiment/websocket_experiment_task.cc',
diff --git a/chrome/common/url_constants.cc b/chrome/common/url_constants.cc
index 4177d92..879b2f7 100644
--- a/chrome/common/url_constants.cc
+++ b/chrome/common/url_constants.cc
@@ -79,8 +79,7 @@ const char kChromeUIResourcesHost[] = "resources";
const char kChromeUIFileBrowseHost[] = "filebrowse";
const char kChromeUIMediaplayerHost[] = "mediaplayer";
const char kChromeUIInspectorHost[] = "inspector";
-// TODO(eroman): This value is temporary, while the page is being implemented.
-const char kChromeUINetInternalsHost[] = "net2";
+const char kChromeUINetInternalsHost[] = "net-internals";
const char kChromeUINewTabHost[] = "newtab";
const char kChromeUIThumbnailPath[] = "thumb";
const char kChromeUIThemePath[] = "theme";
@@ -95,7 +94,7 @@ const char kSyncSetupDonePath[] = "setupdone";
const char kAppCacheViewInternalsURL[] = "chrome://appcache-internals/";
const char kNetworkViewInternalsURL[] = "chrome://net-internals/";
-const char kNetworkViewCacheURL[] = "chrome://net-internals/view-cache";
+const char kNetworkViewCacheURL[] = "chrome://view-http-cache/";
void RegisterChromeSchemes() {
// Don't need "chrome-internal" which was used in old versions of Chrome for
diff --git a/net/base/host_resolver_impl.cc b/net/base/host_resolver_impl.cc
index 1b5eb78..ffaada3 100644
--- a/net/base/host_resolver_impl.cc
+++ b/net/base/host_resolver_impl.cc
@@ -155,70 +155,24 @@ class HostResolverImpl::Request {
//-----------------------------------------------------------------------------
-// Threadsafe log.
-class HostResolverImpl::RequestsTrace
- : public base::RefCountedThreadSafe<HostResolverImpl::RequestsTrace> {
- public:
- RequestsTrace() {}
-
- void Add(const std::string& msg) {
- CapturingNetLog::Entry entry(NetLog::TYPE_TODO_STRING,
- base::TimeTicks::Now(),
- NetLog::Source(),
- NetLog::PHASE_NONE,
- new NetLogStringParameter("todo", msg));
- AutoLock l(lock_);
- entries_.push_back(entry);
- }
-
- void Get(CapturingNetLog::EntryList* entries) {
- AutoLock l(lock_);
- *entries = entries_;
- }
-
- void Clear() {
- AutoLock l(lock_);
- entries_.clear();
- }
-
- private:
- Lock lock_;
- CapturingNetLog::EntryList entries_;
-};
-
-//-----------------------------------------------------------------------------
-
// This class represents a request to the worker pool for a "getaddrinfo()"
// call.
class HostResolverImpl::Job
: public base::RefCountedThreadSafe<HostResolverImpl::Job> {
public:
- Job(int id, HostResolverImpl* resolver, const Key& key,
- RequestsTrace* requests_trace)
+ Job(int id, HostResolverImpl* resolver, const Key& key)
: id_(id),
key_(key),
resolver_(resolver),
origin_loop_(MessageLoop::current()),
resolver_proc_(resolver->effective_resolver_proc()),
- requests_trace_(requests_trace),
error_(OK),
had_non_speculative_request_(false) {
- if (requests_trace_) {
- requests_trace_->Add(StringPrintf(
- "Created job j%d for {hostname='%s', address_family=%d}",
- id_, key.hostname.c_str(),
- static_cast<int>(key.address_family)));
- }
}
// Attaches a request to this job. The job takes ownership of |req| and will
// take care to delete it.
void AddRequest(Request* req) {
- if (requests_trace_) {
- requests_trace_->Add(StringPrintf(
- "Attached request r%d to job j%d", req->id(), id_));
- }
-
req->set_job(this);
requests_.push_back(req);
@@ -228,9 +182,6 @@ class HostResolverImpl::Job
// Called from origin loop.
void Start() {
- if (requests_trace_)
- requests_trace_->Add(StringPrintf("Starting job j%d", id_));
-
start_time_ = base::TimeTicks::Now();
// Dispatch the job to a worker thread.
@@ -252,9 +203,6 @@ class HostResolverImpl::Job
HostResolver* resolver = resolver_;
resolver_ = NULL;
- if (requests_trace_)
- requests_trace_->Add(StringPrintf("Cancelled job j%d", id_));
-
// Mark the job as cancelled, so when worker thread completes it will
// not try to post completion to origin loop.
{
@@ -320,11 +268,6 @@ class HostResolverImpl::Job
// objects (like MessageLoops, Singletons, etc). During shutdown these objects
// may no longer exist.
void DoLookup() {
- if (requests_trace_) {
- requests_trace_->Add(StringPrintf(
- "[resolver thread] Running job j%d", id_));
- }
-
// Running on the worker thread
error_ = ResolveAddrInfo(resolver_proc_,
key_.hostname,
@@ -332,11 +275,6 @@ class HostResolverImpl::Job
key_.host_resolver_flags,
&results_);
- if (requests_trace_) {
- requests_trace_->Add(StringPrintf(
- "[resolver thread] Completed job j%d", id_));
- }
-
// The origin loop could go away while we are trying to post to it, so we
// need to call its PostTask method inside a lock. See ~HostResolver.
{
@@ -358,13 +296,6 @@ class HostResolverImpl::Job
base::TimeDelta job_duration = base::TimeTicks::Now() - start_time_;
- if (requests_trace_) {
- requests_trace_->Add(StringPrintf(
- "Completing job j%d (took %d milliseconds)",
- id_,
- static_cast<int>(job_duration.InMilliseconds())));
- }
-
if (had_non_speculative_request_) {
// TODO(eroman): Add histogram for job times of non-speculative
// requests.
@@ -403,9 +334,6 @@ class HostResolverImpl::Job
// reference ensures that it remains valid until we are done.
scoped_refptr<HostResolverProc> resolver_proc_;
- // Thread safe log to write details into, or NULL.
- scoped_refptr<RequestsTrace> requests_trace_;
-
// Assigned on the worker thread, read on the origin thread.
int error_;
@@ -868,72 +796,6 @@ void HostResolverImpl::Shutdown() {
DiscardIPv6ProbeJob();
}
-void HostResolverImpl::ClearRequestsTrace() {
- if (requests_trace_)
- requests_trace_->Clear();
-}
-
-void HostResolverImpl::EnableRequestsTracing(bool enable) {
- requests_trace_ = enable ? new RequestsTrace : NULL;
- if (enable) {
- // Print the state of the world when logging was started.
- requests_trace_->Add("Enabled tracing");
- requests_trace_->Add(StringPrintf(
- "Current num outstanding jobs: %d",
- static_cast<int>(jobs_.size())));
-
- // Dump all of the outstanding jobs.
- if (!jobs_.empty()) {
- for (JobMap::iterator job_it = jobs_.begin();
- job_it != jobs_.end(); ++job_it) {
- Job* job = job_it->second;
-
- requests_trace_->Add(StringPrintf(
- "Outstanding job j%d for {host='%s', address_family=%d}, "
- "which was started at t=%d",
- job->id(),
- job->key().hostname.c_str(),
- static_cast<int>(job->key().address_family),
- static_cast<int>((job->start_time() - base::TimeTicks())
- .InMilliseconds())));
-
- // Dump all of the requests attached to this job.
- for (RequestsList::const_iterator req_it = job->requests().begin();
- req_it != job->requests().end(); ++req_it) {
- Request* req = *req_it;
- requests_trace_->Add(StringPrintf(
- " %sOutstanding request r%d is attached to job j%d "
- "{priority=%d, speculative=%d, referrer='%s'}",
- req->was_cancelled() ? "[CANCELLED] " : "",
- req->id(),
- job->id(),
- static_cast<int>(req->info().priority()),
- static_cast<int>(req->info().is_speculative()),
- req->info().referrer().spec().c_str()));
- }
- }
- }
-
- size_t total = 0u;
- for (size_t i = 0; i < arraysize(job_pools_); ++i)
- total += job_pools_[i]->GetNumPendingRequests();
-
- requests_trace_->Add(StringPrintf(
- "Number of queued requests: %d", static_cast<int>(total)));
- }
-}
-
-bool HostResolverImpl::IsRequestsTracingEnabled() const {
- return !!requests_trace_; // Cast to bool.
-}
-
-bool HostResolverImpl::GetRequestsTrace(CapturingNetLog::EntryList* entries) {
- if (!requests_trace_)
- return false;
- requests_trace_->Get(entries);
- return true;
-}
-
void HostResolverImpl::SetPoolConstraints(JobPoolIndex pool_index,
size_t max_outstanding_jobs,
size_t max_pending_requests) {
@@ -1014,20 +876,6 @@ void HostResolverImpl::OnStartRequest(const BoundNetLog& net_log,
const RequestInfo& info) {
net_log.BeginEvent(NetLog::TYPE_HOST_RESOLVER_IMPL, NULL);
- if (requests_trace_) {
- requests_trace_->Add(StringPrintf(
- "Received request r%d for {hostname='%s', port=%d, priority=%d, "
- "speculative=%d, address_family=%d, allow_cached=%d, referrer='%s'}",
- request_id,
- info.hostname().c_str(),
- info.port(),
- static_cast<int>(info.priority()),
- static_cast<int>(info.is_speculative()),
- static_cast<int>(info.address_family()),
- static_cast<int>(info.allow_cached_response()),
- info.referrer().spec().c_str()));
- }
-
// Notify the observers of the start.
if (!observers_.empty()) {
net_log.BeginEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONSTART, NULL);
@@ -1045,11 +893,6 @@ void HostResolverImpl::OnFinishRequest(const BoundNetLog& net_log,
int request_id,
const RequestInfo& info,
int error) {
- if (requests_trace_) {
- requests_trace_->Add(StringPrintf(
- "Finished request r%d with error=%d", request_id, error));
- }
-
// Notify the observers of the completion.
if (!observers_.empty()) {
net_log.BeginEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONFINISH, NULL);
@@ -1071,9 +914,6 @@ void HostResolverImpl::OnCancelRequest(const BoundNetLog& net_log,
const RequestInfo& info) {
net_log.AddEvent(NetLog::TYPE_CANCELLED, NULL);
- if (requests_trace_)
- requests_trace_->Add(StringPrintf("Cancelled request r%d", request_id));
-
// Notify the observers of the cancellation.
if (!observers_.empty()) {
net_log.BeginEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONCANCEL, NULL);
@@ -1176,7 +1016,7 @@ HostResolverImpl::Key HostResolverImpl::GetEffectiveKeyForRequest(
HostResolverImpl::Job* HostResolverImpl::CreateAndStartJob(Request* req) {
DCHECK(CanCreateJobForPool(*GetPoolForRequest(req)));
Key key = GetEffectiveKeyForRequest(req->info());
- scoped_refptr<Job> job = new Job(next_job_id_++, this, key, requests_trace_);
+ scoped_refptr<Job> job = new Job(next_job_id_++, this, key);
job->AddRequest(req);
AddOutstandingJob(job);
job->Start();
@@ -1184,9 +1024,6 @@ HostResolverImpl::Job* HostResolverImpl::CreateAndStartJob(Request* req) {
}
int HostResolverImpl::EnqueueRequest(JobPool* pool, Request* req) {
- if (requests_trace_)
- requests_trace_->Add(StringPrintf("Queued request r%d", req->id()));
-
scoped_ptr<Request> req_evicted_from_queue(
pool->InsertPendingRequest(req));
@@ -1195,9 +1032,6 @@ int HostResolverImpl::EnqueueRequest(JobPool* pool, Request* req) {
Request* r = req_evicted_from_queue.get();
int error = ERR_HOST_RESOLVER_QUEUE_TOO_LARGE;
- if (requests_trace_)
- requests_trace_->Add(StringPrintf("Evicted request r%d", r->id()));
-
OnFinishRequest(r->net_log(), r->id(), r->info(), error);
if (r == req)
diff --git a/net/base/host_resolver_impl.h b/net/base/host_resolver_impl.h
index 80ca2ca..938e52f 100644
--- a/net/base/host_resolver_impl.h
+++ b/net/base/host_resolver_impl.h
@@ -105,17 +105,6 @@ class HostResolverImpl : public HostResolver,
// Returns the cache this resolver uses, or NULL if caching is disabled.
HostCache* cache() { return cache_.get(); }
- // Clears the request trace log.
- void ClearRequestsTrace();
-
- // Starts/ends capturing requests to a trace log.
- void EnableRequestsTracing(bool enable);
-
- bool IsRequestsTracingEnabled() const;
-
- // Gets a copy of the requests trace log.
- bool GetRequestsTrace(CapturingNetLog::EntryList* entries);
-
// Applies a set of constraints for requests that belong to the specified
// pool. NOTE: Don't call this after requests have been already been started.
//
@@ -136,7 +125,6 @@ class HostResolverImpl : public HostResolver,
class JobPool;
class IPv6ProbeJob;
class Request;
- class RequestsTrace;
typedef std::vector<Request*> RequestsList;
typedef HostCache::Key Key;
typedef std::map<Key, scoped_refptr<Job> > JobMap;
@@ -256,8 +244,6 @@ class HostResolverImpl : public HostResolver,
NetworkChangeNotifier* const network_change_notifier_;
- scoped_refptr<RequestsTrace> requests_trace_;
-
// Indicate if probing is done after each network change event to set address
// family.
// When false, explicit setting of address family is used.
diff --git a/net/base/net_log.h b/net/base/net_log.h
index ba271c5..69676b7 100644
--- a/net/base/net_log.h
+++ b/net/base/net_log.h
@@ -31,9 +31,7 @@ namespace net {
// ******** The NetLog (and associated logging) is a work in progress ********
//
// TODO(eroman): Remove the 'const' qualitifer from the BoundNetLog methods.
-// TODO(eroman): Remove NetLogUtil. Pretty printing should only be done from
-// javascript, and should be very context-aware.
-// TODO(eroman): Make the DNS jobs emit directly into the NetLog.
+// TODO(eroman): Make the DNS jobs emit into the NetLog.
// TODO(eroman): Start a new Source each time URLRequest redirects
// (simpler to reason about each as a separate entity).
// TODO(eroman): Add the URLRequest load flags to the start entry.
diff --git a/net/base/net_log_util.cc b/net/base/net_log_util.cc
deleted file mode 100644
index b5339b9..0000000
--- a/net/base/net_log_util.cc
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/base/net_log_util.h"
-
-#include "base/format_macros.h"
-#include "base/json/json_writer.h"
-#include "base/string_util.h"
-#include "base/values.h"
-#include "net/base/net_errors.h"
-
-namespace net {
-namespace {
-
-class FormatHelper {
- public:
- std::string ToString(const std::vector<CapturingNetLog::Entry>& entries,
- size_t num_entries_truncated) {
- entries_.clear();
-
- // Pass 1: Match the start/end of indentation blocks. Fills |entries_|
- // with the results.
- PopulateEntries(entries);
-
- // Pass 2: Figure out the maximum width of each column. This allows us
- // to right-justify text within each column.
- size_t max_time_width, max_indentation, max_type_width, max_dt_width;
- GetMaxExtent(
- &max_time_width, &max_indentation, &max_type_width, &max_dt_width);
-
- // Pass 3: Assemble the string.
- std::string result;
-
- const int kSpacesPerIndentation = 2;
-
- for (size_t i = 0; i < entries_.size(); ++i) {
- if (num_entries_truncated > 0 && i + 1 == entries_.size()) {
- StringAppendF(&result, " ... Truncated %" PRIuS " entries ...\n",
- num_entries_truncated);
- }
-
- if (entries_[i].block_index != -1 &&
- static_cast<size_t>(entries_[i].block_index + 1) == i) {
- // If there were no entries in between the START/END block then don't
- // bother printing a line for END (it just adds noise, and we already
- // show the time delta besides START anyway).
- continue;
- }
-
- int indentation_spaces = entries_[i].indentation * kSpacesPerIndentation;
- std::string entry_str = GetEntryString(i);
-
- StringAppendF(&result, "t=%s: %s%s",
- PadStringLeft(GetTimeString(i), max_time_width).c_str(),
- PadStringLeft("", indentation_spaces).c_str(),
- entry_str.c_str());
-
- if (entries_[i].IsBeginEvent()) {
- // Summarize how long this block lasted.
- int padding = ((max_indentation - entries_[i].indentation) *
- kSpacesPerIndentation) + (max_type_width - entry_str.size());
- StringAppendF(&result, "%s [dt=%s]",
- PadStringLeft("", padding).c_str(),
- PadStringLeft(GetBlockDtString(i), max_dt_width).c_str());
- }
-
- // Append any custom parameters.
- NetLog::EventParameters* extra_params =
- entries_[i].log_entry->extra_parameters;
-
- if (extra_params) {
- std::string extra_details;
- scoped_ptr<Value> extra_details_value(extra_params->ToValue());
- base::JSONWriter::Write(extra_details_value.get(), true,
- &extra_details);
- // JSON writer uses CR LF in its pretty-printer. Normalize to newlines.
- ReplaceSubstringsAfterOffset(&extra_details, 0, "\r\n", "\n");
- result.append("\n");
- result.append(extra_details);
- }
-
- if (!extra_params && i + 1 != entries_.size())
- result += "\n";
- }
-
- return result;
- }
-
- private:
- struct Entry {
- explicit Entry(const CapturingNetLog::Entry* log_entry)
- : log_entry(log_entry), indentation(0), block_index(-1) {}
-
- bool IsBeginEvent() const {
- return log_entry->phase == NetLog::PHASE_BEGIN;
- }
-
- bool IsEndEvent() const {
- return log_entry->phase == NetLog::PHASE_END;
- }
-
- const CapturingNetLog::Entry* log_entry;
- size_t indentation;
- int block_index; // The index of the matching start / end of block.
- };
-
- void PopulateEntries(const std::vector<CapturingNetLog::Entry>& entries) {
- int current_indentation = 0;
-
- for (size_t i = 0; i < entries.size(); ++i) {
- Entry entry(&entries[i]);
-
- entry.indentation = current_indentation;
-
- if (entry.IsBeginEvent()) {
- // Indent everything contained in this block.
- current_indentation++;
- }
-
- if (entry.IsEndEvent()) {
- int start_index = FindStartOfBlockIndex(entry);
- if (start_index != -1) {
- // Point the start / end of block at each other.
- entry.block_index = start_index;
- entries_[start_index].block_index = i;
-
- // Restore the indentation prior to the block.
- // (Could be more than 1 level if close of blocks are missing).
- current_indentation = entries_[start_index].indentation;
- entry.indentation = current_indentation;
- }
- }
-
- entries_.push_back(entry);
- }
- }
-
- int FindStartOfBlockIndex(const Entry& entry) {
- DCHECK(entry.IsEndEvent());
-
- // Find the matching start of block by scanning backwards.
- for (int i = entries_.size() - 1; i >= 0; --i) {
- if (entries_[i].IsBeginEvent() &&
- entries_[i].log_entry->type == entry.log_entry->type) {
- return i;
- }
- }
- return -1; // Start not found.
- }
-
- void GetMaxExtent(size_t* max_time_width,
- size_t* max_indentation,
- size_t* max_type_width,
- size_t* max_dt_width) {
- *max_time_width = *max_indentation = *max_type_width = *max_dt_width = 0;
- for (size_t i = 0; i < entries_.size(); ++i) {
- *max_time_width = std::max(*max_time_width, GetTimeString(i).size());
- if (entries_[i].log_entry->phase != NetLog::PHASE_NONE)
- *max_type_width = std::max(*max_type_width, GetEntryString(i).size());
- *max_indentation = std::max(*max_indentation, entries_[i].indentation);
-
- if (entries_[i].IsBeginEvent())
- *max_dt_width = std::max(*max_dt_width, GetBlockDtString(i).size());
- }
- }
-
- std::string GetBlockDtString(size_t start_index) {
- int end_index = entries_[start_index].block_index;
- if (end_index == -1) {
- // Block is not closed, implicitly close it at EOF.
- end_index = entries_.size() - 1;
- }
- int64 dt_ms = (entries_[end_index].log_entry->time -
- entries_[start_index].log_entry->time).InMilliseconds();
-
- return Int64ToString(dt_ms);
- }
-
- std::string GetTimeString(size_t index) {
- int64 t_ms = (entries_[index].log_entry->time -
- base::TimeTicks()).InMilliseconds();
- return Int64ToString(t_ms);
- }
-
- std::string GetEntryString(size_t index) {
- const CapturingNetLog::Entry* entry = entries_[index].log_entry;
-
- std::string entry_str;
- NetLog::EventPhase phase = entry->phase;
-
- entry_str = NetLog::EventTypeToString(entry->type);
-
- if (phase == NetLog::PHASE_BEGIN &&
- index + 1 < entries_.size() &&
- static_cast<size_t>(entries_[index + 1].block_index) == index) {
- // If this starts an empty block, we will pretend it is a PHASE_NONE
- // so we don't print the "+" prefix.
- phase = NetLog::PHASE_NONE;
- }
-
- switch (phase) {
- case NetLog::PHASE_BEGIN:
- return std::string("+") + entry_str;
- case NetLog::PHASE_END:
- return std::string("-") + entry_str;
- case NetLog::PHASE_NONE:
- return std::string(" ") + entry_str;
- default:
- NOTREACHED();
- return std::string();
- }
- }
-
- static std::string PadStringLeft(const std::string& str, size_t width) {
- DCHECK_LE(str.size(), width);
- std::string padding;
- padding.resize(width - str.size(), ' ');
- return padding + str;
- }
-
- std::vector<Entry> entries_;
-};
-
-} // namespace
-
-// static
-std::string NetLogUtil::PrettyPrintAsEventTree(
- const std::vector<CapturingNetLog::Entry>& entries,
- size_t num_entries_truncated) {
- FormatHelper helper;
- return helper.ToString(entries, num_entries_truncated);
-}
-
-} // namespace net
diff --git a/net/base/net_log_util.h b/net/base/net_log_util.h
deleted file mode 100644
index 0d603e8..0000000
--- a/net/base/net_log_util.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_BASE_NET_LOG_UTIL_H_
-#define NET_BASE_NET_LOG_UTIL_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "net/base/capturing_net_log.h"
-
-namespace net {
-
-// The NetLogUtil utility class contains methods to analyze and visualize
-// NetLog entries.
-
-class NetLogUtil {
- public:
- struct EventDuration {
- NetLog::EventType event;
- base::TimeDelta duration;
- };
- typedef std::vector<EventDuration> EventDurationList;
-
- // Builds a pretty printed ASCII tree showing the chronological order
- // of events.
- //
- // The indentation reflects hiearchy, with the duration of each indented
- // block noted on the right. The timestamp (tick count in milliseconds)
- // is noted in the left column.
- //
- // This detailed view of what went into servicing a request can be used
- // in logs, and is copy-pastable by users, for attaching to bug reports.
- //
- // Example A:
- //
- // t=0: +Event1 [dt = 8]
- // t=1: +Event2 [dt = 0]
- // t=1: EventX
- // t=1: -Event2
- // t=4: +Event3 [dt = 2]
- // t=6: -Event3
- // t=6: +Event2 [dt = 1]
- // t=7: -Event2
- // t=8: EventY
- // t=8: -Event1
- //
- // Here we can see that:
- // - Event1 started at t=0 and ended at t=8; the duration was 8 time units.
- // - Event2 took place while Event1 was in progress, and was repeated
- // at t=1 and t=6.
- // - EventX took place while (the first) Event2 was in progress.
- // - Event3 started and ended at the same time, taking 0 time.
- // - EventY took place right before Event1 finished, at t=8
- //
- // In general the rules are:
- // - Log entries added by BeginEvent() are prefixed with a '+' and
- // start an indentation block.
- // - Log entries added by EndEvent() are prefixed with a '-' and
- // finish an indentation block.
- // - Log entries added by AddEvent() have no prefix.
- // - Time units are given as milliseconds.
- //
- static std::string PrettyPrintAsEventTree(
- const std::vector<CapturingNetLog::Entry>& entries,
- size_t num_entries_truncated);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(NetLogUtil);
-};
-
-} // namespace net
-
-#endif // NET_BASE_NET_LOG_UTIL_H_
diff --git a/net/base/net_log_util_unittest.cc b/net/base/net_log_util_unittest.cc
deleted file mode 100644
index 98de4bc..0000000
--- a/net/base/net_log_util_unittest.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/base/net_log_unittest.h"
-#include "net/base/net_log_util.h"
-#include "net/base/net_errors.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace net {
-namespace {
-
-CapturingNetLog::Entry MakeEventEntry(int t,
- NetLog::EventType event_type,
- NetLog::EventPhase event_phase) {
- return CapturingNetLog::Entry(event_type,
- MakeTime(t),
- NetLog::Source(),
- event_phase,
- NULL);
-}
-
-TEST(NetLogUtilTest, Basic) {
- CapturingNetLog::EntryList log;
-
- log.push_back(MakeEventEntry(1, NetLog::TYPE_HOST_RESOLVER_IMPL,
- NetLog::PHASE_BEGIN));
- log.push_back(
- MakeEventEntry(5, NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONSTART,
- NetLog::PHASE_BEGIN));
- log.push_back(
- MakeEventEntry(8, NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONSTART,
- NetLog::PHASE_END));
-
- log.push_back(MakeEventEntry(12, NetLog::TYPE_CANCELLED,
- NetLog::PHASE_NONE));
-
- log.push_back(MakeEventEntry(131, NetLog::TYPE_HOST_RESOLVER_IMPL,
- NetLog::PHASE_END));
-
- EXPECT_EQ(
- "t= 1: +HOST_RESOLVER_IMPL [dt=130]\n"
- "t= 5: HOST_RESOLVER_IMPL_OBSERVER_ONSTART [dt= 3]\n"
- "t= 12: CANCELLED\n"
- "t=131: -HOST_RESOLVER_IMPL",
- NetLogUtil::PrettyPrintAsEventTree(log, 0));
-}
-
-TEST(NetLogUtilTest, Basic2) {
- CapturingNetLog::EntryList log;
-
- log.push_back(MakeEventEntry(1, NetLog::TYPE_HOST_RESOLVER_IMPL,
- NetLog::PHASE_BEGIN));
-
- // Attach a string parameter to a CANCELLED event.
- CapturingNetLog::Entry e =
- MakeEventEntry(12, NetLog::TYPE_CANCELLED, NetLog::PHASE_NONE);
- e.extra_parameters =
- new NetLogStringParameter("string_name", "string_value");
- log.push_back(e);
-
- log.push_back(MakeEventEntry(131, NetLog::TYPE_HOST_RESOLVER_IMPL,
- NetLog::PHASE_END));
-
- EXPECT_EQ(
- "t= 1: +HOST_RESOLVER_IMPL [dt=130]\n"
- "t= 12: CANCELLED\n"
- "{\n"
- " \"string_name\": \"string_value\"\n"
- "}\n"
- "t=131: -HOST_RESOLVER_IMPL",
- NetLogUtil::PrettyPrintAsEventTree(log, 0));
-}
-
-TEST(NetLogUtilTest, UnmatchedOpen) {
- CapturingNetLog::EntryList log;
-
- log.push_back(MakeEventEntry(3, NetLog::TYPE_HOST_RESOLVER_IMPL,
- NetLog::PHASE_BEGIN));
- // Note that there is no matching call to PHASE_END for all of the following.
- log.push_back(
- MakeEventEntry(
- 6, NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONSTART,
- NetLog::PHASE_BEGIN));
- log.push_back(
- MakeEventEntry(7, NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONSTART,
- NetLog::PHASE_BEGIN));
- log.push_back(
- MakeEventEntry(8, NetLog::TYPE_HOST_RESOLVER_IMPL_OBSERVER_ONSTART,
- NetLog::PHASE_BEGIN));
- log.push_back(MakeEventEntry(10, NetLog::TYPE_CANCELLED,
- NetLog::PHASE_NONE));
- log.push_back(MakeEventEntry(16, NetLog::TYPE_HOST_RESOLVER_IMPL,
- NetLog::PHASE_END));
-
- EXPECT_EQ(
- "t= 3: +HOST_RESOLVER_IMPL [dt=13]\n"
- "t= 6: +HOST_RESOLVER_IMPL_OBSERVER_ONSTART [dt=10]\n"
- "t= 7: +HOST_RESOLVER_IMPL_OBSERVER_ONSTART [dt= 9]\n"
- "t= 8: +HOST_RESOLVER_IMPL_OBSERVER_ONSTART [dt= 8]\n"
- "t=10: CANCELLED\n"
- "t=16: -HOST_RESOLVER_IMPL",
- NetLogUtil::PrettyPrintAsEventTree(log, 0));
-}
-
-TEST(NetLogUtilTest, DisplayOfTruncated) {
- CapturingNetLog::EntryList log;
-
- log.push_back(MakeEventEntry(0,
- NetLog::TYPE_TCP_CONNECT,
- NetLog::PHASE_BEGIN));
- for (size_t i = 1; i <= 3; ++i) {
- log.push_back(MakeEventEntry(i,
- NetLog::TYPE_CANCELLED,
- NetLog::PHASE_NONE));
- }
- log.push_back(MakeEventEntry(9,
- NetLog::TYPE_TCP_CONNECT,
- NetLog::PHASE_END));
-
- EXPECT_EQ(
- "t=0: +TCP_CONNECT [dt=9]\n"
- "t=1: CANCELLED\n"
- "t=2: CANCELLED\n"
- "t=3: CANCELLED\n"
- " ... Truncated 4 entries ...\n"
- "t=9: -TCP_CONNECT",
- NetLogUtil::PrettyPrintAsEventTree(log, 4));
-}
-
-} // namespace
-} // namespace net
diff --git a/net/net.gyp b/net/net.gyp
index 976b3da..d9c85f9 100644
--- a/net/net.gyp
+++ b/net/net.gyp
@@ -102,8 +102,6 @@
'base/net_log.cc',
'base/net_log.h',
'base/net_log_event_type_list.h',
- 'base/net_log_util.cc',
- 'base/net_log_util.h',
'base/net_module.cc',
'base/net_module.h',
'base/net_util.cc',
@@ -633,7 +631,6 @@
'base/mime_util_unittest.cc',
'base/mock_network_change_notifier.h',
'base/net_log_unittest.h',
- 'base/net_log_util_unittest.cc',
'base/net_test_constants.h',
'base/net_util_unittest.cc',
'base/registry_controlled_domain_unittest.cc',
diff --git a/net/tools/dns_trace_formatter/formatter.py b/net/tools/dns_trace_formatter/formatter.py
deleted file mode 100755
index 2db8095..0000000
--- a/net/tools/dns_trace_formatter/formatter.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/python2.4
-# Copyright (c) 2010 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Application to format Chromium's host resolver traces to something readable.
-
-To use it, start by enabling tracing in Chromium:
- chrome://net-internals/hostresolver
-
-Next, copy-paste the trace results (those lines starting with "t=") and save
-them to a text file.
-
-Lastly, run:
- python formatter.py <file-containing-trace>
-"""
-
-import sys
-
-import parser
-
-# The log lines of the host resolver trace either describe a "request" or a
-# "job". The "request" is the actual DNS resolve request, whereas the "job" is
-# a helper that was spawned to service requests on a particular thread.
-#
-# In the log lines, each request or job is identified using a unique numeric ID.
-#
-# Since DNS resolving is multi-threaded, the log lines may appear jumbled.
-#
-# The first thing this code does is to group the log lines by id. This
-# collection of log lines is called an "entry", and represented by either
-# RequestEntry or JobEntry.
-#
-# So for example, say we had a log resembling:
-#
-# t=1: starting request r1
-# t=1: starting job j1
-# t=2: starting request r2
-# t=3: completed job j1
-# t=4: finished request r2
-# t=5: cancelled request r1
-#
-# We would start by building three entries:
-#
-# (id='r1')
-# t=1: starting request r1
-# t=5: cancelled request r1
-#
-# (id='j1')
-# t=1: starting job j1
-# t=3: completed job j1
-#
-# (id='r2')
-# t=2: starting request r2
-# t=4: finished request r2
-
-(ENTRY_TYPE_JOB, ENTRY_TYPE_REQUEST) = (0, 1)
-
-
-class EntryBase(object):
- """Helper structure that describes an individual hostresolver request/job."""
-
- def __init__(self, entries, entity_id, parsed_lines):
- """Constructor for EntryBase.
-
- Args:
- entries: The full list of EntryBase that this new instance belongs to.
- entity_id: The ID of this request/job.
- parsed_lines: An ordered list of ParsedLine for the log lines that apply
- to this request/job.
- """
- self._entries = entries
- self._entity_id = entity_id
- self._parsed_lines = parsed_lines
-
- def HasParsedLineOfType(self, line_type):
- """Returns true if this request/job contains a log line of type |type|."""
- for line in self._parsed_lines:
- if line.line_type == line_type:
- return True
- return False
-
- def GetElapsedTime(self):
- """Returns the start to finish duration of this request/job."""
- return self.GetEndTime() - self.GetStartTime()
-
- def GetStartTime(self):
- """Returns the start time for this request/job."""
- return self._parsed_lines[0].t
-
- def GetEndTime(self):
- """Returns the end time of this request/job."""
- return self._parsed_lines[-1].t
-
- def HasUnknownStart(self):
- """Returns true if the exact start of this request/job is unknown."""
- return 'is_fake' in self._parsed_lines[0].details
-
- def HasUnknownEnd(self):
- """Returns true if the exact end of this request/job is unknown."""
- return 'is_fake' in self._parsed_lines[-1].details
-
- def WasAliveAt(self, t):
- """Returns true if this request/job was running at time |t|."""
- return t >= self.GetStartTime() and t <= self.GetEndTime()
-
- def GetEntryType(self):
- """Returns whether this entry represents a request or a job.
- Should be one of the enums ENTRY_TYPE_*"""
- return None
-
- def GetLiveEntriesAtStart(self, entry_type):
- return [entry for entry in self._entries
- if (entry != self and entry.WasAliveAt(self.GetStartTime()) and
- entry.GetEntryType() == entry_type)]
-
- def Print(self):
- """Outputs a text description of this request/job."""
- print '------------------------'
- print self.GetCaption()
- print '------------------------'
- print self.GetDetails()
-
- def GetElapsedTimeString(self):
- """Returns a string that describes how long this request/job took."""
- if self.HasUnknownStart() or self.HasUnknownEnd():
- return '%d+ millis' % self.GetElapsedTime()
- return '%d millis' % self.GetElapsedTime()
-
-
-class RequestEntry(EntryBase):
- """Specialization of EntryBase that describes hostresolver request."""
-
- def __init__(self, entries, entity_id, parsed_lines, min_time, max_time):
- """Constructor for RequestEntry.
-
- Args:
- entries: The full list of EntryBase that this new instance belongs to.
- entity_id: The ID of this request.
- parsed_lines: An ordered list of ParsedLine for the log lines that apply
- to this request.
- min_time: The start time of the log.
- max_time: The end time of the log.
- """
- EntryBase.__init__(self, entries, entity_id, parsed_lines)
-
- # Scan the parsed lines for this request to find the details on the request.
- self.request_details = '???'
- for line in parsed_lines:
- if 'request_details' in line.details:
- self.request_details = line.details['request_details']
- break
-
- # If the log didn't capture when the request was received, manufacture a log
- # entry for the start, at t = min_time - 1
- if not self.HasParsedLineOfType(parser.LINE_TYPE_RECEIVED_REQUEST):
- fake_line = parser.ParsedLine()
- fake_line.t = min_time - 1
- fake_line.line_type = parser.LINE_TYPE_RECEIVED_REQUEST
- fake_line.raw_line = 'Unknown start of request'
- fake_line.details['is_fake'] = True
- self._parsed_lines.insert(0, fake_line)
-
- # If the log didn't capture when the job ended, manufacture a log entry for
- # the end, at t = max_time + 1
- if not (self.HasParsedLineOfType(parser.LINE_TYPE_FINISHED_REQUEST) or
- self.HasParsedLineOfType(parser.LINE_TYPE_CANCELLED_REQUEST)):
- fake_line = parser.ParsedLine()
- fake_line.t = max_time + 1
- fake_line.line_type = parser.LINE_TYPE_FINISHED_REQUEST
- fake_line.raw_line = 'Unknown end of request'
- fake_line.details['is_fake'] = True
- self._parsed_lines.append(fake_line)
-
- def GetEntryType(self):
- return ENTRY_TYPE_REQUEST
-
- def GetCaption(self):
- return 'Request %s (took %s) for %s ' % (self._entity_id,
- self.GetElapsedTimeString(),
- self.request_details)
-
- def GetDetails(self):
- reqs = self.GetLiveEntriesAtStart(ENTRY_TYPE_REQUEST)
- out = [('There were %d requests already in progress when this '
- 'started:') % len(reqs)]
- out.extend([' ' + r.GetCaption() for r in reqs])
-
- out.append('Log lines:')
- out.extend([' ' + line.raw_line for line in self._parsed_lines])
-
- return '\n'.join(out)
-
-
-class JobEntry(EntryBase):
- """Specialization of EntryBase that describes hostresolver request."""
-
- def __init__(self, entries, entity_id, parsed_lines, min_time, max_time):
- """Constructor for JobEntry.
-
- Args:
- entries: The full list of EntryBase that this new instance belongs to.
- entity_id: The ID of this job.
- parsed_lines: An ordered list of ParsedLine for the log lines that apply
- to this job.
- min_time: The start time of the log.
- max_time: The end time of the log.
- """
- EntryBase.__init__(self, entries, entity_id, parsed_lines)
-
- # Find the hostname/address_family of the job
- self.hostname = '???'
- self.address_family = '???'
-
- for line in parsed_lines:
- if 'hostname' in line.details and 'address_family' in line.details:
- self.hostname = line.details['hostname']
- self.address_family = line.details['address_family']
- break
-
- # If the log didn't capture when the job started, manufacture a start time.
- if not (self.HasParsedLineOfType(parser.LINE_TYPE_CREATED_JOB) or
- self.HasParsedLineOfType(parser.LINE_TYPE_OUTSTANDING_JOB) or
- self.HasParsedLineOfType(parser.LINE_TYPE_STARTING_JOB)):
- fake_line = parser.ParsedLine()
- fake_line.t = min_time - 1
- fake_line.line_type = parser.LINE_TYPE_OUTSTANDING_JOB
- fake_line.raw_line = 'Unknown start of job'
- fake_line.details['is_fake'] = True
- self._parsed_lines.insert(0, fake_line)
-
- # If the log didn't capture when the job ended, manufacture an end time.
- if not self.HasParsedLineOfType(parser.LINE_TYPE_COMPLETING_JOB):
- fake_line = parser.ParsedLine()
- fake_line.t = max_time + 1
- fake_line.line_type = parser.LINE_TYPE_COMPLETING_JOB
- fake_line.raw_line = 'Unknown end of job'
- fake_line.details['is_fake'] = True
- self._parsed_lines.append(fake_line)
-
- def GetEntryType(self):
- return ENTRY_TYPE_JOB
-
- def GetCaption(self):
- return 'Job %s (took %s) for "%s" ' % (self._entity_id,
- self.GetElapsedTimeString(),
- self.hostname)
-
- def GetDetails(self):
- jobs = self.GetLiveEntriesAtStart(ENTRY_TYPE_JOB)
- out = [('There were %d jobs already in progress when '
- 'this started:' % len(jobs))]
- out.extend([' ' + j.GetCaption() for j in jobs])
-
- out.append('Log lines:')
- out.extend([' ' + line.raw_line for line in self._parsed_lines])
-
- return '\n'.join(out)
-
-
-def BuildEntries(parsed_lines, min_time, max_time):
- """Returns a list of BaseEntrys built from |parsed_lines|."""
-
- # In this loop we aggregate all of the parsed lines with common entity_id, and
- # also determine the order that entity_ids are first seen in.
- id_to_line_list = {}
- entity_ids = []
- for line in parsed_lines:
- entity_id = line.entity_id
- if not entity_id in entity_ids:
- entity_ids.append(entity_id)
- lines = id_to_line_list.setdefault(entity_id, [])
- lines.append(line)
-
- # Create an entry (either JobEntry or RequestEntry) for each unique entity_id
- # in the trace. Ordered by their first appearance in the trace.
-
- entries = []
- for entity_id in entity_ids:
- if entity_id.startswith('j'):
- entries.append(JobEntry(entries,
- entity_id, id_to_line_list[entity_id],
- min_time, max_time))
- if entity_id.startswith('r'):
- entries.append(RequestEntry(entries,
- entity_id, id_to_line_list[entity_id],
- min_time, max_time))
-
- return entries
-
-
-def main():
- if len(sys.argv) != 2:
- print 'Usage: %s <logfile_path>' % sys.argv[0]
- sys.exit(1)
-
- parsed_lines = parser.ParseFile(sys.argv[1])
-
- min_time = parsed_lines[0].t
- max_time = parsed_lines[-1].t
-
- entries = BuildEntries(parsed_lines, min_time, max_time)
-
- for entry in entries:
- entry.Print()
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/net/tools/dns_trace_formatter/parser.py b/net/tools/dns_trace_formatter/parser.py
deleted file mode 100755
index a19b9d7..0000000
--- a/net/tools/dns_trace_formatter/parser.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#!/usr/bin/python2.4
-# Copyright (c) 2010 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Package for parsing the log lines from a Chromium DNS trace capture."""
-
-import re
-
-# The type of log line.
-(LINE_TYPE_OUTSTANDING_JOB,
- LINE_TYPE_CREATED_JOB,
- LINE_TYPE_STARTING_JOB,
- LINE_TYPE_RUNNING_JOB,
- LINE_TYPE_COMPLETED_JOB,
- LINE_TYPE_COMPLETING_JOB,
- LINE_TYPE_RECEIVED_REQUEST,
- LINE_TYPE_ATTACHED_REQUEST,
- LINE_TYPE_FINISHED_REQUEST,
- LINE_TYPE_CANCELLED_REQUEST) = range(10)
-
-class ParsedLine(object):
- """Structure that represents a parsed line from the trace log.
-
- Attributes:
- t: The time (in milliseconds) when the line was logged.
- line_type: The type of event that this line was logging. One of LINE_TYPE_*.
- raw_line: The full unparsed line.
- details: Dictionary containing additional properties that were parsed from
- this line.
- """
-
- def __init__(self):
- self.t = None
- self.line_type = None
- self.raw_line = None
- self.details = {}
-
-
-def ParseLine(line):
- """Parses |line| into a ParsedLine. Returns None on failure."""
-
- m = re.search(r'^t=(\d+): "(.*)"\s*$', line)
- if not m:
- return None
-
- parsed = ParsedLine()
- parsed.t = int(m.group(1))
- parsed.raw_line = line.strip()
-
- msg = m.group(2)
-
- m = re.search(r"^Received request (r\d+) for (.*)$", msg)
- if m:
- parsed.line_type = LINE_TYPE_RECEIVED_REQUEST
- parsed.entity_id = m.group(1)
- parsed.details['request_details'] = m.group(2)
- return parsed
-
- m = re.search(r"^Created job (j\d+) for {hostname='([^']*)', "
- "address_family=(\d+)}$", msg)
- if m:
- parsed.line_type = LINE_TYPE_CREATED_JOB
- parsed.entity_id = m.group(1)
- parsed.details['hostname'] = m.group(2)
- parsed.details['address_family'] = m.group(3)
- return parsed
-
- m = re.search(r"^Outstanding job (j\d+) for {hostname='([^']*)', address_"
- "family=(\d+)}, which was started at t=(\d+)$", msg)
- if m:
- parsed.line_type = LINE_TYPE_OUTSTANDING_JOB
- parsed.t = int(m.group(4))
- parsed.entity_id = m.group(1)
- parsed.details['hostname'] = m.group(2)
- parsed.details['address_family'] = m.group(3)
- return parsed
-
- m = re.search(r"^Attached request (r\d+) to job (j\d+)$", msg)
- if m:
- parsed.line_type = LINE_TYPE_ATTACHED_REQUEST
- parsed.entity_id = m.group(1)
- parsed.details['job_id'] = m.group(2)
- return parsed
-
- m = re.search(r'^Finished request (r\d+) (.*)$', msg)
- if m:
- parsed.line_type = LINE_TYPE_FINISHED_REQUEST
- parsed.entity_id = m.group(1)
- parsed.details['extra'] = m.group(2)
- return parsed
-
- m = re.search(r'^Cancelled request (r\d+)$', msg)
- if m:
- parsed.line_type = LINE_TYPE_CANCELLED_REQUEST
- parsed.entity_id = m.group(1)
- return parsed
-
- m = re.search(r'^Starting job (j\d+)$', msg)
- if m:
- parsed.line_type = LINE_TYPE_STARTING_JOB
- parsed.entity_id = m.group(1)
- return parsed
-
- m = re.search(r'\[resolver thread\] Running job (j\d+)$', msg)
- if m:
- parsed.line_type = LINE_TYPE_RUNNING_JOB
- parsed.entity_id = m.group(1)
- return parsed
-
- m = re.search(r'\[resolver thread\] Completed job (j\d+)$', msg)
- if m:
- parsed.line_type = LINE_TYPE_COMPLETED_JOB
- parsed.entity_id = m.group(1)
- return parsed
-
- m = re.search(r'Completing job (j\d+).*$', msg)
- if m:
- parsed.line_type = LINE_TYPE_COMPLETING_JOB
- parsed.entity_id = m.group(1)
- return parsed
-
- return None
-
-
-def ParseFile(path):
- """Parses the file at |path| and returns a list of ParsedLines."""
- f = open(path, 'r')
-
- entries = []
-
- for line in f:
- parsed = ParseLine(line)
- if parsed:
- entries.append(parsed)
-
- f.close()
- return entries
-
diff --git a/net/tools/dns_trace_formatter/parser_test.py b/net/tools/dns_trace_formatter/parser_test.py
deleted file mode 100755
index 4c3cff9..0000000
--- a/net/tools/dns_trace_formatter/parser_test.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/python2.4
-# Copyright (c) 2010 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Unittests for the parser module."""
-
-import parser
-import unittest
-
-class TestParser(unittest.TestCase):
-
- def testParseInvalidLine(self):
- """Should return None when fails to parse a line."""
- l = parser.ParseLine('''This is not a valid line!''')
- self.assertEqual(None, l)
-
-
- def testParseLine(self):
- """Tests parsing some valid lines"""
- l = parser.ParseLine(
- '''t=170099840: "Received request r17 for {hostname='clients1.goog'''
- '''le.lv', port=80, priority=3, speculative=0, address_family=0, '''
- '''allow_cached=1, referrer=''}"''')
- self.assertEqual(parser.LINE_TYPE_RECEIVED_REQUEST, l.line_type)
- self.assertEqual(170099840, l.t)
- self.assertEqual('r17', l.entity_id)
-
- l = parser.ParseLine(
- '''t=170099840: "Created job j15 for {hostname='clients1.go'''
- '''ogle.lv', address_family=0}"''')
- self.assertEqual(parser.LINE_TYPE_CREATED_JOB, l.line_type)
- self.assertEqual(170099840, l.t)
- self.assertEqual('j15', l.entity_id)
-
- l = parser.ParseLine('t=170099840: "Attached request r17 to job j15"')
- self.assertEqual(parser.LINE_TYPE_ATTACHED_REQUEST, l.line_type)
- self.assertEqual(170099840, l.t)
- self.assertEqual('r17', l.entity_id)
-
- l = parser.ParseLine('t=170103144: "Finished request r18 with error=0"')
- self.assertEqual(parser.LINE_TYPE_FINISHED_REQUEST, l.line_type)
- self.assertEqual(170103144, l.t)
- self.assertEqual('r18', l.entity_id)
-
- l = parser.ParseLine('t=170103461: "Starting job j16"')
- self.assertEqual(parser.LINE_TYPE_STARTING_JOB, l.line_type)
- self.assertEqual(170103461, l.t)
- self.assertEqual('j16', l.entity_id)
-
- l = parser.ParseLine('t=170103461: "[resolver thread] Running job j1"')
- self.assertEqual(parser.LINE_TYPE_RUNNING_JOB, l.line_type)
- self.assertEqual(170103461, l.t)
- self.assertEqual('j1', l.entity_id)
-
- l = parser.ParseLine('t=170110496: "[resolver thread] Completed job j6"')
- self.assertEqual(parser.LINE_TYPE_COMPLETED_JOB, l.line_type)
- self.assertEqual(170110496, l.t)
- self.assertEqual('j6', l.entity_id)
-
- l = parser.ParseLine('t=170110496: "Completing job j4"')
- self.assertEqual(parser.LINE_TYPE_COMPLETING_JOB, l.line_type)
- self.assertEqual(170110496, l.t)
- self.assertEqual('j4', l.entity_id)
-
- l = parser.ParseLine('t=170110496: "Cancelled request r9"')
- self.assertEqual(parser.LINE_TYPE_CANCELLED_REQUEST, l.line_type)
- self.assertEqual(170110496, l.t)
- self.assertEqual('r9', l.entity_id)
-
-
-if __name__ == '__main__':
- unittest.main()