summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authornsylvain@google.com <nsylvain@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-29 20:57:35 +0000
committernsylvain@google.com <nsylvain@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-29 20:57:35 +0000
commit6be5dd40860ccdb891d18b701716e756ba56e917 (patch)
treea190daf03246d0014fb8d8843bcbefb3b3215c27 /net
parent27aca5ae73f4d85c84fa8fdbc54e9e4e0d6c82a0 (diff)
downloadchromium_src-6be5dd40860ccdb891d18b701716e756ba56e917.zip
chromium_src-6be5dd40860ccdb891d18b701716e756ba56e917.tar.gz
chromium_src-6be5dd40860ccdb891d18b701716e756ba56e917.tar.bz2
Reverting revision 70, 66 and 65 to fix crashes.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@71 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/base/data_url.cc6
-rw-r--r--net/base/escape.cc44
-rw-r--r--net/base/escape.h21
-rw-r--r--net/base/escape_unittest.cc31
-rw-r--r--net/base/net_util.cc8
-rw-r--r--net/http/http_proxy_service.cc15
-rw-r--r--net/url_request/url_request_ftp_job.cc4
7 files changed, 39 insertions, 90 deletions
diff --git a/net/base/data_url.cc b/net/base/data_url.cc
index 95d31e7..cf8e239 100644
--- a/net/base/data_url.cc
+++ b/net/base/data_url.cc
@@ -97,8 +97,7 @@ bool DataURL::Parse(const GURL& url, std::string* mime_type,
// could be part of the payload, so don't strip it.
if (base64_encoded) {
temp_data = UnescapeURLComponent(temp_data,
- UnescapeRule::SPACES | UnescapeRule::URL_SPECIAL_CHARS |
- UnescapeRule::CONTROL_CHARS);
+ UnescapeRule::SPACES | UnescapeRule::PERCENTS);
}
// Strip whitespace.
@@ -111,8 +110,7 @@ bool DataURL::Parse(const GURL& url, std::string* mime_type,
if (!base64_encoded) {
temp_data = UnescapeURLComponent(temp_data,
- UnescapeRule::SPACES | UnescapeRule::URL_SPECIAL_CHARS |
- UnescapeRule::CONTROL_CHARS);
+ UnescapeRule::SPACES | UnescapeRule::PERCENTS);
}
if (base64_encoded)
diff --git a/net/base/escape.cc b/net/base/escape.cc
index 330a3ed..bd4aa95 100644
--- a/net/base/escape.cc
+++ b/net/base/escape.cc
@@ -81,6 +81,7 @@ class Charmap {
uint32 map_[8];
};
+
// Given text to escape and a Charmap defining which values to escape,
// return an escaped string. If use_plus is true, spaces are converted
// to +, otherwise, if spaces are in the charmap, they are converted to
@@ -104,32 +105,6 @@ const std::string Escape(const std::string& text, const Charmap& charmap,
return escaped;
}
-// Contains nonzero when the corresponding character is unescapable for normal
-// URLs. These characters are the ones that may change the parsing of a URL, so
-// we don't want to unescape them sometimes. In many case we won't want to
-// unescape spaces, but that is controlled by parameters to Unescape*.
-//
-// The basic rule is that we can't unescape anything that would changing parsing
-// like # or ?. We also can't unescape &, =, or + since that could be part of a
-// query and that could change the server's parsing of the query.
-const char kUrlUnescape[128] = {
-// NULL, control chars...
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-// ' ' ! " # $ % & ' ( ) * + , - . /
- 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
-// 0 1 2 3 4 5 6 7 8 9 : ; < = > ?
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,
-// @ A B C D E F G H I J K L M N O
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-// P Q R S T U V W X Y Z [ \ ] ^ _
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-// ` a b c d e f g h i j k l m n o
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-// p q r s t u v w x y z { | } ~ <NBSP>
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0
-};
-
std::string UnescapeURLImpl(const std::string& escaped_text,
UnescapeRule::Type rules) {
// The output of the unescaping is always smaller than the input, so we can
@@ -146,34 +121,21 @@ std::string UnescapeURLImpl(const std::string& escaped_text,
if (IsHex(most_sig_digit) && IsHex(least_sig_digit)) {
unsigned char value = HexToInt(most_sig_digit) * 16 +
HexToInt(least_sig_digit);
- if (value >= 0x80 || // Unescape all high-bit characters.
- // For 7-bit characters, the lookup table tells us all valid chars.
- (kUrlUnescape[value] ||
- // ...and we allow some additional unescaping when flags are set.
- (value == ' ' && (rules & UnescapeRule::SPACES)) ||
- // Allow any of the prohibited but non-control characters when
- // we're doing "special" chars.
- (value > ' ' && (rules & UnescapeRule::URL_SPECIAL_CHARS)) ||
- // Additionally allow control characters if requested.
- (value < ' ' && (rules & UnescapeRule::CONTROL_CHARS)))) {
+ if (((rules & UnescapeRule::PERCENTS) || value != '%') &&
+ ((rules & UnescapeRule::SPACES) || value != ' ')) {
// Use the unescaped version of the character.
result.push_back(value);
i += 2;
} else {
- // Keep escaped. Append a percent and we'll get the following two
- // digits on the next loops through.
result.push_back('%');
}
} else {
- // Invalid escape sequence, just pass the percent through and continue
- // right after it.
result.push_back('%');
}
} else if ((rules & UnescapeRule::REPLACE_PLUS_WITH_SPACE) &&
escaped_text[i] == '+') {
result.push_back(' ');
} else {
- // Normal case for unescaped characters.
result.push_back(escaped_text[i]);
}
}
diff --git a/net/base/escape.h b/net/base/escape.h
index 4b86a64..220eebc 100644
--- a/net/base/escape.h
+++ b/net/base/escape.h
@@ -77,20 +77,17 @@ class UnescapeRule {
// by other applications.
SPACES = 1,
- // Unescapes various characters that will change the meaning of URLs,
- // including '%', '+', '&', '/', '#'. If we unescaped these charaters, the
- // resulting URL won't be the same as the source one. This flag is used when
- // generating final output like filenames for URLs where we won't be
- // interpreting as a URL and want to do as much unescaping as possible.
- URL_SPECIAL_CHARS = 2,
-
- // Unescapes control characters such as %01. This INCLUDES NULLs!. This is
- // used for rare cases such as data: URL decoding where the result is binary
- // data. You should not use this for normal URLs!
- CONTROL_CHARS = 4,
+ // Unescapes "%25" to "%". This must not be used when the resulting string
+ // will need to be interpreted as a URL again, since we won't know what
+ // should be escaped and what shouldn't. For example, "%2520" would be
+ // converted to "%20" which would have different meaning than the origina.
+ // This flag is used when generating final output like filenames for URLs
+ // where we won't be interpreting as a URL and want to do as much unescaping
+ // as possible.
+ PERCENTS = 2,
// URL queries use "+" for space. This flag controls that replacement.
- REPLACE_PLUS_WITH_SPACE = 8,
+ REPLACE_PLUS_WITH_SPACE = 4,
};
};
diff --git a/net/base/escape_unittest.cc b/net/base/escape_unittest.cc
index 53100a91..d2d0288 100644
--- a/net/base/escape_unittest.cc
+++ b/net/base/escape_unittest.cc
@@ -121,18 +121,11 @@ TEST(Escape, UnescapeURLComponent) {
{"Invalid %escape %2", UnescapeRule::NORMAL, "Invalid %escape %2"},
{"Some%20random text %25%3bOK", UnescapeRule::NORMAL, "Some%20random text %25;OK"},
{"Some%20random text %25%3bOK", UnescapeRule::SPACES, "Some random text %25;OK"},
- {"Some%20random text %25%3bOK", UnescapeRule::URL_SPECIAL_CHARS, "Some%20random text %;OK"},
- {"Some%20random text %25%3bOK", UnescapeRule::SPACES | UnescapeRule::URL_SPECIAL_CHARS, "Some random text %;OK"},
+ {"Some%20random text %25%3bOK", UnescapeRule::PERCENTS, "Some%20random text %;OK"},
+ {"Some%20random text %25%3bOK", UnescapeRule::SPACES | UnescapeRule::PERCENTS, "Some random text %;OK"},
+ {"%01%02%03%04%05%06%07%08%09", UnescapeRule::NORMAL, "\x01\x02\x03\x04\x05\x06\x07\x08\x09"},
{"%A0%B1%C2%D3%E4%F5", UnescapeRule::NORMAL, "\xA0\xB1\xC2\xD3\xE4\xF5"},
- {"%Aa%Bb%Cc%Dd%Ee%Ff", UnescapeRule::NORMAL, "\xAa\xBb\xCc\xDd\xEe\xFf"},
- // Certain URL-sensitive characters should not be unescaped unless asked.
- {"Hello%20%13%10world %23# %3F? %3D= %26& %25% %2B+", UnescapeRule::SPACES, "Hello %13%10world %23# %3F? %3D= %26& %25% %2B+"},
- {"Hello%20%13%10world %23# %3F? %3D= %26& %25% %2B+", UnescapeRule::URL_SPECIAL_CHARS, "Hello%20%13%10world ## ?? == && %% ++"},
- // Control characters.
- {"%01%02%03%04%05%06%07%08%09 %25", UnescapeRule::URL_SPECIAL_CHARS, "%01%02%03%04%05%06%07%08%09 %"},
- {"%01%02%03%04%05%06%07%08%09 %25", UnescapeRule::CONTROL_CHARS, "\x01\x02\x03\x04\x05\x06\x07\x08\x09 %25"},
- {"Hello%20%13%10%02", UnescapeRule::SPACES, "Hello %13%10%02"},
- {"Hello%20%13%10%02", UnescapeRule::CONTROL_CHARS, "Hello%20\x13\x10\x02"},
+ {"%Aa%Bb%Cc%Dd%Ee%Ff", UnescapeRule::NORMAL, "\xAa\xBb\xCc\xDd\xEe\xFf"}
};
for (int i = 0; i < arraysize(unescape_cases); i++) {
@@ -141,23 +134,17 @@ TEST(Escape, UnescapeURLComponent) {
UnescapeURLComponent(str, unescape_cases[i].rules));
}
- // Test the NULL character unescaping (which wouldn't work above since those
- // are just char pointers).
+ // test the NULL character escaping (which wouldn't work above since those
+ // are just char pointers)
std::string input("Null");
input.push_back(0); // Also have a NULL in the input.
input.append("%00%39Test");
- // When we're unescaping NULLs
std::string expected("Null");
expected.push_back(0);
expected.push_back(0);
expected.append("9Test");
- EXPECT_EQ(expected, UnescapeURLComponent(input, UnescapeRule::CONTROL_CHARS));
- // When we're not unescaping NULLs.
- expected = "Null";
- expected.push_back(0);
- expected.append("%009Test");
EXPECT_EQ(expected, UnescapeURLComponent(input, UnescapeRule::NORMAL));
}
@@ -191,9 +178,9 @@ TEST(Escape, UnescapeAndDecodeURLComponent) {
"Some random text %25;OK",
L"Some random text %25;OK"},
{"UTF8", "%01%02%03%04%05%06%07%08%09",
- "%01%02%03%04%05%06%07%08%09",
- "%01%02%03%04%05%06%07%08%09",
- L"%01%02%03%04%05%06%07%08%09"},
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x09",
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x09",
+ L"\x01\x02\x03\x04\x05\x06\x07\x08\x09"},
{"UTF8", "%E4%BD%A0+%E5%A5%BD",
"\xE4\xBD\xA0+\xE5\xA5\xBD",
"\xE4\xBD\xA0 \xE5\xA5\xBD",
diff --git a/net/base/net_util.cc b/net/base/net_util.cc
index 68570006..416252c 100644
--- a/net/base/net_util.cc
+++ b/net/base/net_util.cc
@@ -715,7 +715,7 @@ bool FileURLToFilePath(const GURL& url, std::wstring* file_path) {
// GURL stores strings as percent-encoded UTF-8, this will undo if possible.
path = UnescapeURLComponent(path,
- UnescapeRule::SPACES | UnescapeRule::URL_SPECIAL_CHARS);
+ UnescapeRule::SPACES | UnescapeRule::PERCENTS);
if (!IsStringUTF8(path.c_str())) {
// Not UTF-8, assume encoding is native codepage and we're done. We know we
@@ -937,11 +937,9 @@ std::wstring GetSuggestedFilename(const GURL& url,
TrimString(filename, L".", &filename);
}
if (filename.empty()) {
- if (url.is_valid()) {
+ if (url.is_valid())
filename = UnescapeAndDecodeUTF8URLComponent(
- url.ExtractFileName(),
- UnescapeRule::SPACES | UnescapeRule::URL_SPECIAL_CHARS);
- }
+ url.ExtractFileName(), UnescapeRule::SPACES | UnescapeRule::PERCENTS);
}
// Trim '.' once more.
diff --git a/net/http/http_proxy_service.cc b/net/http/http_proxy_service.cc
index d851a6d..e29ae33 100644
--- a/net/http/http_proxy_service.cc
+++ b/net/http/http_proxy_service.cc
@@ -385,10 +385,6 @@ int HttpProxyService::ReconsiderProxyAfterError(const GURL& url,
HttpProxyInfo* result,
CompletionCallback* callback,
PacRequest** pac_request) {
- bool was_direct = result->is_direct();
- if (!was_direct && result->Fallback(&http_proxy_retry_info_))
- return OK;
-
// Check to see if we have a new config since ResolveProxy was called. We
// want to re-run ResolveProxy in two cases: 1) we have a new config, or 2) a
// direct connection failed and we never tried the current config.
@@ -398,6 +394,8 @@ int HttpProxyService::ReconsiderProxyAfterError(const GURL& url,
UpdateConfig();
if (result->config_id_ != config_.id()) {
// A new configuration!
+ // We can forget about the bad proxies now.
+ http_proxy_retry_info_.clear();
re_resolve = true;
} else if (!result->config_was_tried_) {
// We never tried the proxy configuration since we thought it was bad,
@@ -405,11 +403,20 @@ int HttpProxyService::ReconsiderProxyAfterError(const GURL& url,
// configuration again to see if it will work now.
config_is_bad_ = false;
re_resolve = true;
+
+ // Clear the map of bad proxies.
+ http_proxy_retry_info_.clear();
}
}
if (re_resolve)
return ResolveProxy(url, result, callback, pac_request);
+ // We don't have new proxy settings to try, fallback to the next proxy
+ // in the list.
+ bool was_direct = result->is_direct();
+ if (!was_direct && result->Fallback(&http_proxy_retry_info_))
+ return OK;
+
if (!config_.auto_detect && !config_.proxy_server.empty()) {
// If auto detect is on, then we should try a DIRECT connection
// as the attempt to reach the proxy failed.
diff --git a/net/url_request/url_request_ftp_job.cc b/net/url_request/url_request_ftp_job.cc
index 202d1fb..f619609 100644
--- a/net/url_request/url_request_ftp_job.cc
+++ b/net/url_request/url_request_ftp_job.cc
@@ -61,7 +61,7 @@ static bool UnescapeAndValidatePath(const URLRequest* request,
// we need to identify the encoding and convert to that encoding.
static const std::string kInvalidChars("\x00\x0d\x0a", 3);
*unescaped_path = UnescapeURLComponent(request->url().path(),
- UnescapeRule::SPACES | UnescapeRule::URL_SPECIAL_CHARS);
+ UnescapeRule::SPACES | UnescapeRule::PERCENTS);
if (unescaped_path->find_first_of(kInvalidChars) != std::string::npos) {
SetLastError(ERROR_INTERNET_INVALID_URL);
// GURL path should not contain '%00' which is NULL(0x00) when unescaped.
@@ -416,7 +416,7 @@ void URLRequestFtpJob::OnStartDirectoryTraversal() {
// Unescape the URL path and pass the raw 8bit directly to the browser.
string html = net_util::GetDirectoryListingHeader(
UnescapeURLComponent(request_->url().path(),
- UnescapeRule::SPACES | UnescapeRule::URL_SPECIAL_CHARS));
+ UnescapeRule::SPACES | UnescapeRule::PERCENTS));
// If this isn't top level directory (i.e. the path isn't "/",) add a link to
// the parent directory.