diff options
author | jam@chromium.org <jam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-12-23 00:43:34 +0000 |
---|---|---|
committer | jam@chromium.org <jam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-12-23 00:43:34 +0000 |
commit | 4fa642feef61c12f55ec3d75e178aa2d677194de (patch) | |
tree | 32ab1c53f40a525f600ece06f1d0cd359053694b /net/tools | |
parent | f09bc4e35d0bed570fcc1767bf8e1272913d2590 (diff) | |
download | chromium_src-4fa642feef61c12f55ec3d75e178aa2d677194de.zip chromium_src-4fa642feef61c12f55ec3d75e178aa2d677194de.tar.gz chromium_src-4fa642feef61c12f55ec3d75e178aa2d677194de.tar.bz2 |
Use a perfect hash map for the registry controlled domain service.
On my very fast machine, building the std::set in release mode on startup and blocks the UI thread for 15ms (there are > 3300 entries). It also uses 275KB of memory, not including 50KB of data in the dll. Using a perfect hash map, there's no startup cost. The dll's size increases by 135KB but there's no extra memory consumption, leading to a memory reduction of 140KB.
Review URL: http://codereview.chromium.org/515001
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@35196 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/tools')
-rw-r--r-- | net/tools/tld_cleanup/tld_cleanup.cc | 148 |
1 files changed, 95 insertions, 53 deletions
diff --git a/net/tools/tld_cleanup/tld_cleanup.cc b/net/tools/tld_cleanup/tld_cleanup.cc index 71c16c0..df75b41 100644 --- a/net/tools/tld_cleanup/tld_cleanup.cc +++ b/net/tools/tld_cleanup/tld_cleanup.cc @@ -3,12 +3,19 @@ // found in the LICENSE file. // This command-line program converts an effective-TLD data file in UTF-8 from -// the format provided by Mozilla to the format expected by Chrome. Any errors -// or warnings are recorded in tld_cleanup.log. +// the format provided by Mozilla to the format expected by Chrome. This +// program generates an intermediate file which is then used by gperf to +// generate a perfect hash map. The benefit of this approach is that no time is +// spent on program initialization to generate the map of this data. +// +// After running this program, "effective_tld_names.gperf" is generated. Run +// gperf using the following command line: +// gperf -a -L "C++" -C -c -o -t -k '*' -NFindDomain -D -m 5 effective_tld_names.gperf > effective_tld_names.c +// +// Any errors or warnings from this program are recorded in tld_cleanup.log. // // In particular, it // * Strips blank lines and comments, as well as notes for individual rules. -// * Changes all line endings to LF. // * Strips a single leading and/or trailing dot from each rule, if present. // * Logs a warning if a rule contains '!' or '*.' other than at the beginning // of the rule. (This also catches multiple ! or *. at the start of a rule.) @@ -16,6 +23,7 @@ // * Canonicalizes each rule's domain by converting it to a GURL and back. // * Adds explicit rules for true TLDs found in any rule. +#include <map> #include <set> #include <string> @@ -31,32 +39,50 @@ #include "googleurl/src/gurl.h" #include "googleurl/src/url_parse.h" -typedef std::set<std::string> StringSet; +namespace { +struct Rule { + bool exception; + bool wildcard; +}; + +typedef std::map<std::string, Rule> RuleMap; +typedef std::set<std::string> RuleSet; +} // Writes the list of domain rules contained in the 'rules' set to the // 'outfile', with each rule terminated by a LF. The file must already have // been created with write access. -bool WriteRules(const StringSet& rules, FilePath outfile) { +bool WriteRules(const RuleMap& rules, FilePath outfile) { std::string data; data.append( + "%{\n" "// Copyright (c) 2009 The Chromium Authors. All rights reserved.\n" "// Use of this source code is governed by a BSD-style license that\n" "// can be found in the LICENSE file.\n\n" "// This file is generated by net/tools/tld_cleanup/.\n" "// DO NOT MANUALLY EDIT!\n" - "#include \"net/base/registry_controlled_domain.h\"\n\n" - "const char net::RegistryControlledDomainService::kDomainData[] =\n" + "%}\n" + "struct DomainRule {\n" + " const char *name;\n" + " int type; // 1: exception, 2: wildcard\n" + "};\n" + "%%\n" ); - for (StringSet::const_iterator iter = rules.begin(); - iter != rules.end(); - ++iter) { - data.append(" \""); - data.append(*iter); - data.append("\\n\"\n"); + for (RuleMap::const_iterator i = rules.begin(); i != rules.end(); ++i) { + data.append(i->first); + data.append(", "); + if (i->second.exception) { + data.append("1"); + } else if (i->second.wildcard) { + data.append("2"); + } else { + data.append("0"); + } + data.append("\n"); } - data.append(";\n"); + data.append("%%\n"); int written = file_util::WriteFile(outfile, data.data(), data.size()); @@ -74,67 +100,61 @@ typedef enum { // canonicalizes it using GURL. Returns kSuccess if the rule is interpreted as // valid; logs a warning and returns kWarning if it is probably invalid; and // logs an error and returns kError if the rule is (almost) certainly invalid. -NormalizeResult NormalizeRule(std::string* rule) { +NormalizeResult NormalizeRule(std::string* domain, Rule* rule) { NormalizeResult result = kSuccess; // Strip single leading and trailing dots. - if (rule->at(0) == '.') - rule->erase(0, 1); - if (rule->size() == 0) { + if (domain->at(0) == '.') + domain->erase(0, 1); + if (domain->size() == 0) { LOG(WARNING) << "Ignoring empty rule"; return kWarning; } - if (rule->at(rule->size() - 1) == '.') - rule->erase(rule->size() - 1, 1); - if (rule->size() == 0) { + if (domain->at(domain->size() - 1) == '.') + domain->erase(domain->size() - 1, 1); + if (domain->size() == 0) { LOG(WARNING) << "Ignoring empty rule"; return kWarning; } // Allow single leading '*.' or '!', saved here so it's not canonicalized. - bool wildcard = false; - bool exception = false; + rule->wildcard = false; + rule->exception = false; size_t start_offset = 0; - if (rule->at(0) == '!') { - rule->erase(0, 1); - exception = true; - } else if (rule->find("*.") == 0) { - rule->erase(0, 2); - wildcard = true; + if (domain->at(0) == '!') { + domain->erase(0, 1); + rule->exception = true; + } else if (domain->find("*.") == 0) { + domain->erase(0, 2); + rule->wildcard = true; } - if (rule->size() == 0) { + if (domain->size() == 0) { LOG(WARNING) << "Ignoring empty rule"; return kWarning; } // Warn about additional '*.' or '!'. - if (rule->find("*.", start_offset) != std::string::npos || - rule->find('!', start_offset) != std::string::npos) { - LOG(WARNING) << "Keeping probably invalid rule: " << *rule; + if (domain->find("*.", start_offset) != std::string::npos || + domain->find('!', start_offset) != std::string::npos) { + LOG(WARNING) << "Keeping probably invalid rule: " << *domain; result = kWarning; } // Make a GURL and normalize it, then get the host back out. std::string url = "http://"; - url.append(*rule); + url.append(*domain); GURL gurl(url); const std::string& spec = gurl.possibly_invalid_spec(); url_parse::Component host = gurl.parsed_for_possibly_invalid_spec().host; if (host.len < 0) { - LOG(ERROR) << "Ignoring rule that couldn't be normalized: " << *rule; + LOG(ERROR) << "Ignoring rule that couldn't be normalized: " << *domain; return kError; } if (!gurl.is_valid()) { - LOG(WARNING) << "Keeping rule that GURL says is invalid: " << *rule; + LOG(WARNING) << "Keeping rule that GURL says is invalid: " << *domain; result = kWarning; } - rule->assign(spec.substr(host.begin, host.len)); - - // Restore wildcard or exception marker. - if (exception) - rule->insert(0, 1, '!'); - else if (wildcard) - rule->insert(0, "*."); + domain->assign(spec.substr(host.begin, host.len)); return result; } @@ -153,11 +173,12 @@ NormalizeResult NormalizeFile(const FilePath& in_filename, // We do a lot of string assignment during parsing, but simplicity is more // important than performance here. - std::string rule; + std::string domain; NormalizeResult result = kSuccess; size_t line_start = 0; size_t line_end = 0; - StringSet rules; + RuleMap rules; + RuleSet extra_rules; while (line_start < data.size()) { // Skip comments. if (line_start + 1 < data.size() && @@ -171,15 +192,24 @@ NormalizeResult NormalizeFile(const FilePath& in_filename, line_end = data.find_first_of("\r\n \t", line_start); if (line_end == std::string::npos) line_end = data.size(); - rule.assign(data.data(), line_start, line_end - line_start); + domain.assign(data.data(), line_start, line_end - line_start); - NormalizeResult new_result = NormalizeRule(&rule); + Rule rule; + NormalizeResult new_result = NormalizeRule(&domain, &rule); if (new_result != kError) { - rules.insert(rule); - // Add true TLD for multi-level rules. - size_t tld_start = rule.find_last_of('.'); - if (tld_start != std::string::npos && tld_start + 1 < rule.size()) - rules.insert(rule.substr(tld_start + 1)); + // Check the existing rules to make sure we don't have an exception and + // wildcard for the same rule. If we did, we'd have to update our + // parsing code to handle this case. + CHECK(rules.find(domain) == rules.end()); + + rules[domain] = rule; + // Add true TLD for multi-level rules. We don't add them right now, in + // case there's an exception or wild card that either exists or might be + // added in a later iteration. In those cases, there's no need to add + // it and it would just slow down parsing the data. + size_t tld_start = domain.find_last_of('.'); + if (tld_start != std::string::npos && tld_start + 1 < domain.size()) + extra_rules.insert(domain.substr(tld_start + 1)); } result = std::max(result, new_result); } @@ -193,6 +223,17 @@ NormalizeResult NormalizeFile(const FilePath& in_filename, line_start = data.size(); } + for (RuleSet::const_iterator iter = extra_rules.begin(); + iter != extra_rules.end(); + ++iter) { + if (rules.find(*iter) == rules.end()) { + Rule rule; + rule.exception = false; + rule.wildcard = false; + rules[*iter] = rule; + } + } + if (!WriteRules(rules, out_filename)) { LOG(ERROR) << "Error(s) writing output file"; result = kError; @@ -241,7 +282,8 @@ int main(int argc, const char* argv[]) { PathService::Get(base::DIR_SOURCE_ROOT, &output_file); output_file = output_file.Append(FILE_PATH_LITERAL("net")) .Append(FILE_PATH_LITERAL("base")) - .Append(FILE_PATH_LITERAL("effective_tld_names.cc")); + .Append(FILE_PATH_LITERAL( + "effective_tld_names.gperf")); NormalizeResult result = NormalizeFile(input_file, output_file); if (result != kSuccess) { fprintf(stderr, |