summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/browser/autocomplete/autocomplete.cc19
-rw-r--r--chrome/browser/autocomplete/autocomplete_unittest.cc16
-rw-r--r--chrome/browser/net/url_fixer_upper.cc138
-rw-r--r--chrome/browser/net/url_fixer_upper_unittest.cc1
-rw-r--r--net/base/net_util.cc6
-rw-r--r--net/base/net_util.h7
-rw-r--r--net/base/net_util_unittest.cc46
7 files changed, 139 insertions, 94 deletions
diff --git a/chrome/browser/autocomplete/autocomplete.cc b/chrome/browser/autocomplete/autocomplete.cc
index eca1038..2167ea2 100644
--- a/chrome/browser/autocomplete/autocomplete.cc
+++ b/chrome/browser/autocomplete/autocomplete.cc
@@ -172,8 +172,22 @@ AutocompleteInput::Type AutocompleteInput::Parse(
const std::wstring host(text.substr(parts->host.begin, parts->host.len));
const size_t registry_length =
net::RegistryControlledDomainService::GetRegistryLength(host, false);
- if (registry_length == std::wstring::npos)
+ if (registry_length == std::wstring::npos) {
+ // Try to append the desired_tld.
+ if (!desired_tld.empty()) {
+ std::wstring host_with_tld(host);
+ if (host[host.length() - 1] != '.')
+ host_with_tld += '.';
+ host_with_tld += desired_tld;
+ if (net::RegistryControlledDomainService::GetRegistryLength(
+ host_with_tld, false) != std::wstring::npos)
+ return REQUESTED_URL; // Something like "99999999999" that looks like a
+ // bad IP address, but becomes valid on attaching
+ // a TLD.
+ }
return QUERY; // Could be a broken IP address, etc.
+ }
+
// See if the hostname is valid. While IE and GURL allow hostnames to contain
// many other characters (perhaps for weird intranet machines), it's extremely
@@ -182,7 +196,8 @@ AutocompleteInput::Type AutocompleteInput::Parse(
url_canon::CanonHostInfo host_info;
const std::string canonicalized_host(net::CanonicalizeHost(host, &host_info));
if ((host_info.family == url_canon::CanonHostInfo::NEUTRAL) &&
- !net::IsCanonicalizedHostCompliant(canonicalized_host)) {
+ !net::IsCanonicalizedHostCompliant(canonicalized_host,
+ WideToUTF8(desired_tld))) {
// Invalid hostname. There are several possible cases:
// * Our checker is too strict and the user pasted in a real-world URL
// that's "invalid" but resolves. To catch these, we return UNKNOWN when
diff --git a/chrome/browser/autocomplete/autocomplete_unittest.cc b/chrome/browser/autocomplete/autocomplete_unittest.cc
index 2507310..8042724 100644
--- a/chrome/browser/autocomplete/autocomplete_unittest.cc
+++ b/chrome/browser/autocomplete/autocomplete_unittest.cc
@@ -280,6 +280,22 @@ TEST(AutocompleteTest, InputType) {
}
}
+TEST(AutocompleteTest, InputTypeWithDesiredTLD) {
+ struct test_data {
+ const wchar_t* input;
+ const AutocompleteInput::Type type;
+ } input_cases[] = {
+ { L"401k", AutocompleteInput::REQUESTED_URL },
+ { L"999999999999999", AutocompleteInput::REQUESTED_URL },
+ };
+
+ for (size_t i = 0; i < ARRAYSIZE_UNSAFE(input_cases); ++i) {
+ AutocompleteInput input(input_cases[i].input, L"com", true, false, false);
+ EXPECT_EQ(input_cases[i].type, input.type()) << "Input: " <<
+ input_cases[i].input;
+ }
+}
+
// This tests for a regression where certain input in the omnibox caused us to
// crash. As long as the test completes without crashing, we're fine.
TEST(AutocompleteTest, InputCrash) {
diff --git a/chrome/browser/net/url_fixer_upper.cc b/chrome/browser/net/url_fixer_upper.cc
index e5dc12a..edacefd 100644
--- a/chrome/browser/net/url_fixer_upper.cc
+++ b/chrome/browser/net/url_fixer_upper.cc
@@ -19,9 +19,6 @@
#include "net/base/net_util.h"
#include "net/base/registry_controlled_domain.h"
-using std::string;
-using std::wstring;
-
const char* URLFixerUpper::home_directory_override = NULL;
namespace {
@@ -32,22 +29,23 @@ namespace {
// UTF8 to UTF16. Instead of this after-the-fact adjustment, we should parse it
// in the correct string format to begin with.
url_parse::Component UTF8ComponentToWideComponent(
- const string& text_utf8,
+ const std::string& text_utf8,
const url_parse::Component& component_utf8) {
if (component_utf8.len == -1)
return url_parse::Component();
- string before_component_string = text_utf8.substr(0, component_utf8.begin);
- string component_string = text_utf8.substr(component_utf8.begin,
- component_utf8.len);
- wstring before_component_string_w = UTF8ToWide(before_component_string);
- wstring component_string_w = UTF8ToWide(component_string);
+ std::string before_component_string =
+ text_utf8.substr(0, component_utf8.begin);
+ std::string component_string = text_utf8.substr(component_utf8.begin,
+ component_utf8.len);
+ std::wstring before_component_string_w = UTF8ToWide(before_component_string);
+ std::wstring component_string_w = UTF8ToWide(component_string);
url_parse::Component component_w(before_component_string_w.length(),
component_string_w.length());
return component_w;
}
-void UTF8PartsToWideParts(const string& text_utf8,
+void UTF8PartsToWideParts(const std::string& text_utf8,
const url_parse::Parsed& parts_utf8,
url_parse::Parsed* parts) {
if (IsStringASCII(text_utf8)) {
@@ -119,7 +117,7 @@ static bool ValidPathForFile(const FilePath::StringType& text,
#if defined(OS_POSIX)
// Given a path that starts with ~, return a path that starts with an
// expanded-out /user/foobar directory.
-static string FixupHomedir(const string& text) {
+static std::string FixupHomedir(const std::string& text) {
DCHECK(text.length() > 0 && text[0] == '~');
if (text.length() == 1 || text[1] == '/') {
@@ -151,7 +149,7 @@ static string FixupHomedir(const string& text) {
// (possibly invalid) file: URL in |fixed_up_url| for input beginning
// with a drive specifier or "\\". Returns the unchanged input in other cases
// (including file: URLs: these don't look like filenames).
-static string FixupPath(const string& text) {
+static std::string FixupPath(const std::string& text) {
DCHECK(!text.empty());
FilePath::StringType filename;
@@ -182,21 +180,23 @@ static string FixupPath(const string& text) {
// Checks |domain| to see if a valid TLD is already present. If not, appends
// |desired_tld| to the domain, and prepends "www." unless it's already present.
-static void AddDesiredTLD(const string& desired_tld,
- string* domain) {
+static void AddDesiredTLD(const std::string& desired_tld,
+ std::string* domain) {
if (desired_tld.empty() || domain->empty())
return;
// Check the TLD. If the return value is positive, we already have a TLD, so
- // abort; if the return value is string::npos, there's no valid host (e.g. if
- // the user pasted in garbage for which HistoryURLProvider is trying to
- // suggest an exact match), so adding a TLD makes no sense. The only useful
- // case is where the return value is 0 (there's a valid host with no known
- // TLD). We disallow unknown registries here so users can input "mail.yahoo"
- // and hit ctrl-enter to get "www.mail.yahoo.com".
+ // abort. If the return value is std::string::npos, there's no valid host,
+ // but we can try to append a TLD anyway, since the host may become valid once
+ // the TLD is attached -- for example, "999999999999" is detected as a broken
+ // IP address and marked invalid, but attaching ".com" makes it legal. When
+ // the return value is 0, there's a valid host with no known TLD, so we can
+ // definitely append the user's TLD. We disallow unknown registries here so
+ // users can input "mail.yahoo" and hit ctrl-enter to get
+ // "www.mail.yahoo.com".
const size_t registry_length =
net::RegistryControlledDomainService::GetRegistryLength(*domain, false);
- if (registry_length != 0)
+ if ((registry_length != 0) && (registry_length != std::string::npos))
return;
// Add the suffix at the end of the domain.
@@ -208,16 +208,16 @@ static void AddDesiredTLD(const string& desired_tld,
domain->append(desired_tld);
// Now, if the domain begins with "www.", stop.
- const string prefix("www.");
+ const std::string prefix("www.");
if (domain->compare(0, prefix.length(), prefix) != 0) {
// Otherwise, add www. to the beginning of the URL.
domain->insert(0, prefix);
}
}
-static inline void FixupUsername(const string& text,
+static inline void FixupUsername(const std::string& text,
const url_parse::Component& part,
- string* url) {
+ std::string* url) {
if (!part.is_valid())
return;
@@ -227,9 +227,9 @@ static inline void FixupUsername(const string& text,
// password. FixupURL itself will append the '@' for us.
}
-static inline void FixupPassword(const string& text,
+static inline void FixupPassword(const std::string& text,
const url_parse::Component& part,
- string* url) {
+ std::string* url) {
if (!part.is_valid())
return;
@@ -238,11 +238,11 @@ static inline void FixupPassword(const string& text,
url->append(text, part.begin, part.len);
}
-static void FixupHost(const string& text,
+static void FixupHost(const std::string& text,
const url_parse::Component& part,
bool has_scheme,
- const string& desired_tld,
- string* url) {
+ const std::string& desired_tld,
+ std::string* url) {
if (!part.is_valid())
return;
@@ -250,12 +250,12 @@ static void FixupHost(const string& text,
// Strip all leading dots and all but one trailing dot, unless the user only
// typed dots, in which case their input is totally invalid and we should just
// leave it unchanged.
- string domain(text, part.begin, part.len);
+ std::string domain(text, part.begin, part.len);
const size_t first_nondot(domain.find_first_not_of('.'));
- if (first_nondot != string::npos) {
+ if (first_nondot != std::string::npos) {
domain.erase(0, first_nondot);
size_t last_nondot(domain.find_last_not_of('.'));
- DCHECK(last_nondot != string::npos);
+ DCHECK(last_nondot != std::string::npos);
last_nondot += 2; // Point at second period in ending string
if (last_nondot < domain.length())
domain.erase(last_nondot);
@@ -267,9 +267,9 @@ static void FixupHost(const string& text,
url->append(domain);
}
-static void FixupPort(const string& text,
+static void FixupPort(const std::string& text,
const url_parse::Component& part,
- string* url) {
+ std::string* url) {
if (!part.is_valid())
return;
@@ -278,9 +278,9 @@ static void FixupPort(const string& text,
url->append(text, part.begin, part.len);
}
-static inline void FixupPath(const string& text,
+static inline void FixupPath(const std::string& text,
const url_parse::Component& part,
- string* url) {
+ std::string* url) {
if (!part.is_valid() || part.len == 0) {
// We should always have a path.
url->append("/");
@@ -291,9 +291,9 @@ static inline void FixupPath(const string& text,
url->append(text, part.begin, part.len);
}
-static inline void FixupQuery(const string& text,
+static inline void FixupQuery(const std::string& text,
const url_parse::Component& part,
- string* url) {
+ std::string* url) {
if (!part.is_valid())
return;
@@ -302,9 +302,9 @@ static inline void FixupQuery(const string& text,
url->append(text, part.begin, part.len);
}
-static inline void FixupRef(const string& text,
+static inline void FixupRef(const std::string& text,
const url_parse::Component& part,
- string* url) {
+ std::string* url) {
if (!part.is_valid())
return;
@@ -350,9 +350,9 @@ static bool HasPort(const std::string& original_text,
// If successful, set |scheme_component| to the text range where the scheme
// was located, and fill |canon_scheme| with its canonicalized form.
// Otherwise, return false and leave the outputs in an indeterminate state.
-static bool GetValidScheme(const string &text,
- url_parse::Component *scheme_component,
- string *canon_scheme) {
+static bool GetValidScheme(const std::string &text,
+ url_parse::Component* scheme_component,
+ std::string* canon_scheme) {
// Locate everything up to (but not including) the first ':'
if (!url_parse::ExtractScheme(text.data(), static_cast<int>(text.length()),
scheme_component))
@@ -374,7 +374,7 @@ static bool GetValidScheme(const string &text,
// We need to fix up the segmentation for "www.example.com:/". For this
// case, we guess that schemes with a "." are not actually schemes.
- if (canon_scheme->find('.') != string::npos)
+ if (canon_scheme->find('.') != std::string::npos)
return false;
// We need to fix up the segmentation for "www:123/". For this case, we
@@ -387,15 +387,15 @@ static bool GetValidScheme(const string &text,
return true;
}
-string URLFixerUpper::SegmentURL(const string& text,
- url_parse::Parsed* parts) {
+std::string URLFixerUpper::SegmentURL(const std::string& text,
+ url_parse::Parsed* parts) {
// Initialize the result.
*parts = url_parse::Parsed();
- string trimmed;
+ std::string trimmed;
TrimWhitespaceUTF8(text, TRIM_ALL, &trimmed);
if (trimmed.empty())
- return string(); // Nothing to segment.
+ return std::string(); // Nothing to segment.
#if defined(OS_WIN)
int trimmed_length = static_cast<int>(trimmed.length());
@@ -408,7 +408,7 @@ string URLFixerUpper::SegmentURL(const string& text,
#endif
// Otherwise, we need to look at things carefully.
- string scheme;
+ std::string scheme;
if (!GetValidScheme(text, &parts->scheme, &scheme)) {
// Couldn't determine the scheme, so just pick one.
parts->scheme.reset();
@@ -431,14 +431,14 @@ string URLFixerUpper::SegmentURL(const string& text,
// We need to add a scheme in order for ParseStandardURL to be happy.
// Find the first non-whitespace character.
- string::const_iterator first_nonwhite = text.begin();
+ std::string::const_iterator first_nonwhite = text.begin();
while ((first_nonwhite != text.end()) && IsWhitespace(*first_nonwhite))
++first_nonwhite;
// Construct the text to parse by inserting the scheme.
- string inserted_text(scheme);
+ std::string inserted_text(scheme);
inserted_text.append("://");
- string text_to_parse(text.begin(), first_nonwhite);
+ std::string text_to_parse(text.begin(), first_nonwhite);
text_to_parse.append(inserted_text);
text_to_parse.append(first_nonwhite, text.end());
@@ -461,16 +461,16 @@ string URLFixerUpper::SegmentURL(const string& text,
return scheme;
}
-string URLFixerUpper::FixupURL(const string& text,
- const string& desired_tld) {
- string trimmed;
+std::string URLFixerUpper::FixupURL(const std::string& text,
+ const std::string& desired_tld) {
+ std::string trimmed;
TrimWhitespaceUTF8(text, TRIM_ALL, &trimmed);
if (trimmed.empty())
- return string(); // Nothing here.
+ return std::string(); // Nothing here.
// Segment the URL.
url_parse::Parsed parts;
- string scheme(SegmentURL(trimmed, &parts));
+ std::string scheme(SegmentURL(trimmed, &parts));
// We handle the file scheme separately.
if (scheme == "file")
@@ -479,7 +479,7 @@ string URLFixerUpper::FixupURL(const string& text,
// For some schemes whose layouts we understand, we rebuild it.
if (url_util::IsStandard(scheme.c_str(),
url_parse::Component(0, static_cast<int>(scheme.length())))) {
- string url(scheme);
+ std::string url(scheme);
url.append("://");
// We need to check whether the |username| is valid because it is our
@@ -502,7 +502,7 @@ string URLFixerUpper::FixupURL(const string& text,
// In the worst-case, we insert a scheme if the URL lacks one.
if (!parts.scheme.is_valid()) {
- string fixed_scheme(scheme);
+ std::string fixed_scheme(scheme);
fixed_scheme.append("://");
trimmed.insert(0, fixed_scheme);
}
@@ -515,8 +515,8 @@ string URLFixerUpper::FixupURL(const string& text,
// fixup will look for cues that it is actually a file path before trying to
// figure out what file it is. If our logic doesn't work, we will fall back on
// regular fixup.
-string URLFixerUpper::FixupRelativeFile(const FilePath& base_dir,
- const FilePath& text) {
+std::string URLFixerUpper::FixupRelativeFile(const FilePath& base_dir,
+ const FilePath& text) {
FilePath old_cur_directory;
if (!base_dir.empty()) {
// Save the old current directory before we move to the new one.
@@ -563,24 +563,24 @@ string URLFixerUpper::FixupRelativeFile(const FilePath& base_dir,
// Fall back on regular fixup for this input.
#if defined(OS_WIN)
- string text_utf8 = WideToUTF8(text.value());
+ std::string text_utf8 = WideToUTF8(text.value());
#elif defined(OS_POSIX)
- string text_utf8 = text.value();
+ std::string text_utf8 = text.value();
#endif
return FixupURL(text_utf8, "");
}
// Deprecated functions. To be removed when all callers are updated.
-wstring URLFixerUpper::SegmentURL(const wstring& text,
- url_parse::Parsed* parts) {
- string text_utf8 = WideToUTF8(text);
+std::wstring URLFixerUpper::SegmentURL(const std::wstring& text,
+ url_parse::Parsed* parts) {
+ std::string text_utf8 = WideToUTF8(text);
url_parse::Parsed parts_utf8;
- string scheme_utf8 = SegmentURL(text_utf8, &parts_utf8);
+ std::string scheme_utf8 = SegmentURL(text_utf8, &parts_utf8);
UTF8PartsToWideParts(text_utf8, parts_utf8, parts);
return UTF8ToWide(scheme_utf8);
}
-wstring URLFixerUpper::FixupRelativeFile(const wstring& base_dir,
- const wstring& text) {
+std::wstring URLFixerUpper::FixupRelativeFile(const std::wstring& base_dir,
+ const std::wstring& text) {
return UTF8ToWide(FixupRelativeFile(FilePath::FromWStringHack(base_dir),
FilePath::FromWStringHack(text)));
}
diff --git a/chrome/browser/net/url_fixer_upper_unittest.cc b/chrome/browser/net/url_fixer_upper_unittest.cc
index 8ea1131..be647ad 100644
--- a/chrome/browser/net/url_fixer_upper_unittest.cc
+++ b/chrome/browser/net/url_fixer_upper_unittest.cc
@@ -283,6 +283,7 @@ TEST(URLFixerUpperTest, FixupURL) {
{"http://google", "com", "http://www.google.com/"},
{"..google..", "com", "http://www.google.com/"},
{"http://www.google", "com", "http://www.google.com/"},
+ {"9999999999999999", "com", "http://www.9999999999999999.com/"},
{"google/foo", "com", "http://www.google.com/foo"},
{"google.com/foo", "com", "http://google.com/foo"},
{"google/?foo=.com", "com", "http://www.google.com/?foo=.com"},
diff --git a/net/base/net_util.cc b/net/base/net_util.cc
index e226659..67726f4e 100644
--- a/net/base/net_util.cc
+++ b/net/base/net_util.cc
@@ -959,7 +959,8 @@ inline bool IsHostCharDigit(char c) {
return (c >= '0') && (c <= '9');
}
-bool IsCanonicalizedHostCompliant(const std::string& host) {
+bool IsCanonicalizedHostCompliant(const std::string& host,
+ const std::string& desired_tld) {
if (host.empty())
return false;
@@ -989,7 +990,8 @@ bool IsCanonicalizedHostCompliant(const std::string& host) {
}
}
- return most_recent_component_started_alpha;
+ return most_recent_component_started_alpha ||
+ (!desired_tld.empty() && IsHostCharAlpha(desired_tld[0]));
}
std::string GetDirectoryListingEntry(const string16& name,
diff --git a/net/base/net_util.h b/net/base/net_util.h
index 28edad9..f614eb6 100644
--- a/net/base/net_util.h
+++ b/net/base/net_util.h
@@ -171,10 +171,15 @@ std::string CanonicalizeHost(const std::wstring& host,
// * Each component contains only alphanumeric characters and '-' or '_'
// * The last component does not begin with a digit
// * Optional trailing dot after last component (means "treat as FQDN")
+// If |desired_tld| is non-NULL, the host will only be considered invalid if
+// appending it as a trailing component still results in an invalid host. This
+// helps us avoid marking as "invalid" user attempts to open "www.401k.com" by
+// typing 4-0-1-k-<ctrl>+<enter>.
//
// NOTE: You should only pass in hosts that have been returned from
// CanonicalizeHost(), or you may not get accurate results.
-bool IsCanonicalizedHostCompliant(const std::string& host);
+bool IsCanonicalizedHostCompliant(const std::string& host,
+ const std::string& desired_tld);
// Call these functions to get the html snippet for a directory listing.
// The return values of both functions are in UTF-8.
diff --git a/net/base/net_util_unittest.cc b/net/base/net_util_unittest.cc
index e9fda58..09573b1 100644
--- a/net/base/net_util_unittest.cc
+++ b/net/base/net_util_unittest.cc
@@ -348,6 +348,7 @@ struct AdjustOffsetCase {
struct CompliantHostCase {
const char* host;
+ const char* desired_tld;
bool expected_output;
};
@@ -838,30 +839,35 @@ TEST(NetUtilTest, IDNToUnicodeAdjustOffset) {
TEST(NetUtilTest, CompliantHost) {
const CompliantHostCase compliant_host_cases[] = {
- {"", false},
- {"a", true},
- {"-", false},
- {".", false},
- {"a.", true},
- {"a.a", true},
- {"9.a", true},
- {"a.9", false},
- {"_9a", false},
- {"a.a9", true},
- {"a.9a", false},
- {"a+9a", false},
- {"1-.a-b", false},
- {"1-2.a_b", true},
- {"a.b.c.d.e", true},
- {"1.2.3.4.e", true},
- {"a.b.c.d.5", false},
- {"1.2.3.4.e.", true},
- {"a.b.c.d.5.", false},
+ {"", "", false},
+ {"a", "", true},
+ {"-", "", false},
+ {".", "", false},
+ {"9", "", false},
+ {"9", "a", true},
+ {"9a", "", false},
+ {"9a", "a", true},
+ {"a.", "", true},
+ {"a.a", "", true},
+ {"9.a", "", true},
+ {"a.9", "", false},
+ {"_9a", "", false},
+ {"a.a9", "", true},
+ {"a.9a", "", false},
+ {"a+9a", "", false},
+ {"1-.a-b", "", false},
+ {"1-2.a_b", "", true},
+ {"a.b.c.d.e", "", true},
+ {"1.2.3.4.e", "", true},
+ {"a.b.c.d.5", "", false},
+ {"1.2.3.4.e.", "", true},
+ {"a.b.c.d.5.", "", false},
};
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(compliant_host_cases); ++i) {
EXPECT_EQ(compliant_host_cases[i].expected_output,
- net::IsCanonicalizedHostCompliant(compliant_host_cases[i].host));
+ net::IsCanonicalizedHostCompliant(compliant_host_cases[i].host,
+ compliant_host_cases[i].desired_tld));
}
}