summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcbentzel@chromium.org <cbentzel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-01-29 21:23:54 +0000
committercbentzel@chromium.org <cbentzel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-01-29 21:23:54 +0000
commitdb471019d15ed9e8e086bf36f426326e3ebbe86d (patch)
treedd996a5ef0d4307f2289a83c0c23ad1e39306f64
parent2c407f3b1b88d643dfc988ed60715fdc18ddeb65 (diff)
downloadchromium_src-db471019d15ed9e8e086bf36f426326e3ebbe86d.zip
chromium_src-db471019d15ed9e8e086bf36f426326e3ebbe86d.tar.gz
chromium_src-db471019d15ed9e8e086bf36f426326e3ebbe86d.tar.bz2
Add backup URL support for SafeBrowsing data requests.
If a request fails, an alternate URL is tried immediately. This is only done if the configuration provides backup URLs to try. Three cases of request failure are supported: - Issues with DNS resolution, TCP connection, or SSL handshake when the client is believed to be connected to the internet. - Issues with invalid HTTP responses from the server. - Issues with general network connectivity from the client. If the backup request fails as well, then the entire update is treated as a failure and no further backups are attempted. BUG=155753 TEST=unit_tests --gtest_filter=SafeBrowsingProtocolManagerTest.* Review URL: https://chromiumcodereview.appspot.com/11784038 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@179412 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--chrome/browser/safe_browsing/protocol_manager.cc162
-rw-r--r--chrome/browser/safe_browsing/protocol_manager.h31
-rw-r--r--chrome/browser/safe_browsing/protocol_manager_helper.h3
-rw-r--r--chrome/browser/safe_browsing/protocol_manager_unittest.cc325
-rw-r--r--chrome/browser/safe_browsing/safe_browsing_service.cc17
5 files changed, 511 insertions, 27 deletions
diff --git a/chrome/browser/safe_browsing/protocol_manager.cc b/chrome/browser/safe_browsing/protocol_manager.cc
index e825e16..8ba91ca 100644
--- a/chrome/browser/safe_browsing/protocol_manager.cc
+++ b/chrome/browser/safe_browsing/protocol_manager.cc
@@ -21,6 +21,7 @@
#include "google_apis/google_api_keys.h"
#include "net/base/escape.h"
#include "net/base/load_flags.h"
+#include "net/base/net_errors.h"
#include "net/url_request/url_fetcher.h"
#include "net/url_request/url_request_context_getter.h"
#include "net/url_request/url_request_status.h"
@@ -28,8 +29,36 @@
using base::Time;
using base::TimeDelta;
+namespace {
+
+// UpdateResult indicates what happened with the primary and/or backup update
+// requests. The ordering of the values must stay the same for UMA consistency,
+// and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
+enum UpdateResult {
+ UPDATE_RESULT_FAIL,
+ UPDATE_RESULT_SUCCESS,
+ UPDATE_RESULT_BACKUP_CONNECT_FAIL,
+ UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
+ UPDATE_RESULT_BACKUP_HTTP_FAIL,
+ UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
+ UPDATE_RESULT_BACKUP_NETWORK_FAIL,
+ UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
+ UPDATE_RESULT_MAX,
+ UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
+};
+
+void RecordUpdateResult(UpdateResult result) {
+ DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
+ UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
+}
+
+} // namespace
+
+// Minimum time, in seconds, from start up before we must issue an update query.
+static const int kSbTimerStartIntervalSecMin = 60;
+
// Maximum time, in seconds, from start up before we must issue an update query.
-static const int kSbTimerStartIntervalSec = 5 * 60;
+static const int kSbTimerStartIntervalSecMax = 300;
// The maximum time, in seconds, to wait for a response to an update request.
static const int kSbMaxUpdateWaitSec = 30;
@@ -80,7 +109,8 @@ SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
update_back_off_mult_(1),
gethash_back_off_mult_(1),
next_update_interval_(base::TimeDelta::FromSeconds(
- base::RandInt(60, kSbTimerStartIntervalSec))),
+ base::RandInt(kSbTimerStartIntervalSecMin,
+ kSbTimerStartIntervalSecMax))),
update_state_(FIRST_REQUEST),
chunk_pending_to_write_(false),
version_(config.version),
@@ -88,10 +118,18 @@ SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
client_name_(config.client_name),
request_context_getter_(request_context_getter),
url_prefix_(config.url_prefix),
+ backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
disable_auto_update_(config.disable_auto_update),
url_fetcher_id_(0) {
DCHECK(!url_prefix_.empty());
+ backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
+ config.backup_connect_error_url_prefix;
+ backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
+ config.backup_http_error_url_prefix;
+ backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
+ config.backup_network_error_url_prefix;
+
// Set the backoff multiplier fuzz to a random value between 0 and 1.
back_off_fuzz_ = static_cast<float>(base::RandDouble());
if (version_.empty())
@@ -154,7 +192,7 @@ void SafeBrowsingProtocolManager::GetFullHash(
void SafeBrowsingProtocolManager::GetNextUpdate() {
DCHECK(CalledOnValidThread());
- if (!request_.get())
+ if (!request_.get() && request_type_ == NO_REQUEST)
IssueUpdateRequest();
}
@@ -225,7 +263,8 @@ void SafeBrowsingProtocolManager::OnURLFetchComplete(
// Update or chunk response.
fetcher.reset(request_.release());
- if (request_type_ == UPDATE_REQUEST) {
+ if (request_type_ == UPDATE_REQUEST ||
+ request_type_ == BACKUP_UPDATE_REQUEST) {
if (!fetcher.get()) {
// We've timed out waiting for an update response, so we've cancelled
// the update request and scheduled a new one. Ignore this response.
@@ -236,7 +275,8 @@ void SafeBrowsingProtocolManager::OnURLFetchComplete(
timeout_timer_.Stop();
}
- if (source->GetStatus().is_success() && source->GetResponseCode() == 200) {
+ net::URLRequestStatus status = source->GetStatus();
+ if (status.is_success() && source->GetResponseCode() == 200) {
// We have data from the SafeBrowsing service.
std::string data;
source->GetResponseAsString(&data);
@@ -246,6 +286,10 @@ void SafeBrowsingProtocolManager::OnURLFetchComplete(
VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
<< " failed parse.";
chunk_request_urls_.clear();
+ if (request_type_ == UPDATE_REQUEST &&
+ IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
+ return;
+ }
UpdateFinished(false);
}
@@ -258,28 +302,52 @@ void SafeBrowsingProtocolManager::OnURLFetchComplete(
}
break;
case UPDATE_REQUEST:
+ case BACKUP_UPDATE_REQUEST:
if (chunk_request_urls_.empty() && parsed_ok) {
// We are up to date since the servers gave us nothing new, so we
// are done with this update cycle.
UpdateFinished(true);
}
break;
+ case NO_REQUEST:
+ // This can happen if HandleServiceResponse fails above.
+ break;
default:
NOTREACHED();
break;
}
} else {
- // The SafeBrowsing service error, or very bad response code: back off.
- if (request_type_ == CHUNK_REQUEST)
- chunk_request_urls_.clear();
- UpdateFinished(false);
- if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
+ if (status.status() == net::URLRequestStatus::FAILED) {
VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
<< " failed with error: " << source->GetStatus().error();
} else {
VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
<< " failed with error: " << source->GetResponseCode();
}
+ if (request_type_ == CHUNK_REQUEST) {
+ // The SafeBrowsing service error, or very bad response code: back off.
+ chunk_request_urls_.clear();
+ } else if (request_type_ == UPDATE_REQUEST) {
+ BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
+ if (status.is_success()) {
+ backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
+ } else {
+ switch (status.error()) {
+ case net::ERR_INTERNET_DISCONNECTED:
+ case net::ERR_NETWORK_CHANGED:
+ backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
+ break;
+ default:
+ backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
+ break;
+ }
+ }
+ if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
+ IssueBackupUpdateRequest(backup_update_reason)) {
+ return;
+ }
+ }
+ UpdateFinished(false);
}
}
@@ -294,7 +362,8 @@ bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
SafeBrowsingProtocolParser parser;
switch (request_type_) {
- case UPDATE_REQUEST: {
+ case UPDATE_REQUEST:
+ case BACKUP_UPDATE_REQUEST: {
int next_update_sec = -1;
bool reset = false;
scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
@@ -467,6 +536,35 @@ void SafeBrowsingProtocolManager::IssueUpdateRequest() {
base::Unretained(this)));
}
+// The backup request can run immediately since the chunks have already been
+// retrieved from the DB.
+bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
+ BackupUpdateReason backup_update_reason) {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(request_type_, UPDATE_REQUEST);
+ DCHECK(backup_update_reason >= 0 &&
+ backup_update_reason < BACKUP_UPDATE_REASON_MAX);
+ if (backup_url_prefixes_[backup_update_reason].empty())
+ return false;
+ request_type_ = BACKUP_UPDATE_REQUEST;
+ backup_update_reason_ = backup_update_reason;
+
+ GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
+ request_.reset(net::URLFetcher::Create(
+ url_fetcher_id_++, backup_update_url, net::URLFetcher::POST, this));
+ request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
+ request_->SetRequestContext(request_context_getter_);
+ request_->SetUploadData("text/plain", update_list_data_);
+ request_->Start();
+
+ // Begin the update request timeout.
+ timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
+ this,
+ &SafeBrowsingProtocolManager::UpdateResponseTimeout);
+
+ return true;
+}
+
void SafeBrowsingProtocolManager::IssueChunkRequest() {
DCHECK(CalledOnValidThread());
// We are only allowed to have one request outstanding at any time. Also,
@@ -491,6 +589,7 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
const std::vector<SBListChunkRanges>& lists, bool database_error) {
DCHECK(CalledOnValidThread());
DCHECK_EQ(request_type_, UPDATE_REQUEST);
+ DCHECK(update_list_data_.empty());
if (database_error) {
// The update was not successful, but don't back off.
UpdateFinished(false, false);
@@ -498,11 +597,10 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
}
// Format our stored chunks:
- std::string list_data;
bool found_malware = false;
bool found_phishing = false;
for (size_t i = 0; i < lists.size(); ++i) {
- list_data.append(FormatList(lists[i]));
+ update_list_data_.append(FormatList(lists[i]));
if (lists[i].name == safe_browsing_util::kPhishingList)
found_phishing = true;
@@ -513,11 +611,11 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
// If we have an empty database, let the server know we want data for these
// lists.
if (!found_phishing)
- list_data.append(FormatList(
+ update_list_data_.append(FormatList(
SBListChunkRanges(safe_browsing_util::kPhishingList)));
if (!found_malware)
- list_data.append(FormatList(
+ update_list_data_.append(FormatList(
SBListChunkRanges(safe_browsing_util::kMalwareList)));
GURL update_url = UpdateUrl();
@@ -525,7 +623,7 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
url_fetcher_id_++, update_url, net::URLFetcher::POST, this));
request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
request_->SetRequestContext(request_context_getter_);
- request_->SetUploadData("text/plain", list_data);
+ request_->SetUploadData("text/plain", update_list_data_);
request_->Start();
// Begin the update request timeout.
@@ -538,8 +636,13 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
// will run. Close the current update session and schedule another update.
void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
DCHECK(CalledOnValidThread());
- DCHECK_EQ(request_type_, UPDATE_REQUEST);
+ DCHECK(request_type_ == UPDATE_REQUEST ||
+ request_type_ == BACKUP_UPDATE_REQUEST);
request_.reset();
+ if (request_type_ == UPDATE_REQUEST &&
+ IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
+ return;
+ }
UpdateFinished(false);
}
@@ -589,6 +692,20 @@ void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
DCHECK(CalledOnValidThread());
UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
update_size_ = 0;
+ bool update_success = success || request_type_ == CHUNK_REQUEST;
+ if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
+ RecordUpdateResult(
+ update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
+ } else {
+ UpdateResult update_result = static_cast<UpdateResult>(
+ UPDATE_RESULT_BACKUP_START +
+ (static_cast<int>(backup_update_reason_) * 2) +
+ update_success);
+ RecordUpdateResult(update_result);
+ }
+ backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
+ request_type_ = NO_REQUEST;
+ update_list_data_.clear();
delegate_->UpdateFinished(success);
ScheduleNextUpdate(back_off);
}
@@ -599,6 +716,17 @@ GURL SafeBrowsingProtocolManager::UpdateUrl() const {
return GURL(url);
}
+GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
+ BackupUpdateReason backup_update_reason) const {
+ DCHECK(backup_update_reason >= 0 &&
+ backup_update_reason < BACKUP_UPDATE_REASON_MAX);
+ DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
+ std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
+ backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
+ version_, additional_query_);
+ return GURL(url);
+}
+
GURL SafeBrowsingProtocolManager::GetHashUrl() const {
std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
url_prefix_, "gethash", client_name_, version_, additional_query_);
diff --git a/chrome/browser/safe_browsing/protocol_manager.h b/chrome/browser/safe_browsing/protocol_manager.h
index 7104be4..53428e1 100644
--- a/chrome/browser/safe_browsing/protocol_manager.h
+++ b/chrome/browser/safe_browsing/protocol_manager.h
@@ -180,11 +180,25 @@ class SafeBrowsingProtocolManager : public net::URLFetcherDelegate,
enum SafeBrowsingRequestType {
NO_REQUEST = 0, // No requests in progress
UPDATE_REQUEST, // Request for redirect URLs
+ BACKUP_UPDATE_REQUEST, // Request for redirect URLs to a backup URL.
CHUNK_REQUEST, // Request for a specific chunk
};
+ // Which type of backup update request is being used.
+ enum BackupUpdateReason {
+ BACKUP_UPDATE_REASON_CONNECT,
+ BACKUP_UPDATE_REASON_HTTP,
+ BACKUP_UPDATE_REASON_NETWORK,
+ BACKUP_UPDATE_REASON_MAX,
+ };
+
// Generates Update URL for querying about the latest set of chunk updates.
GURL UpdateUrl() const;
+
+ // Generates backup Update URL for querying about the latest set of chunk
+ // updates. |url_prefix| is the base prefix to use.
+ GURL BackupUpdateUrl(BackupUpdateReason reason) const;
+
// Generates GetHash request URL for retrieving full hashes.
GURL GetHashUrl() const;
// Generates URL for reporting safe browsing hits for UMA users.
@@ -217,6 +231,14 @@ class SafeBrowsingProtocolManager : public net::URLFetcherDelegate,
// is sent upon completion of that query in OnGetChunksComplete.
void IssueUpdateRequest();
+ // Sends a backup request for a list of chunks to download, when the primary
+ // update request failed. |reason| specifies why the backup is needed. Unlike
+ // the primary IssueUpdateRequest, this does not need to hit the local
+ // SafeBrowsing database since the existing chunk numbers are remembered from
+ // the primary update request. Returns whether the backup request was issued -
+ // this may be false in cases where there is not a prefix specified.
+ bool IssueBackupUpdateRequest(BackupUpdateReason reason);
+
// Sends a request for a chunk to the SafeBrowsing servers.
void IssueChunkRequest();
@@ -339,6 +361,15 @@ class SafeBrowsingProtocolManager : public net::URLFetcherDelegate,
// URL prefix where browser fetches safebrowsing chunk updates, and hashes.
std::string url_prefix_;
+ // Backup URL prefixes for updates.
+ std::string backup_url_prefixes_[BACKUP_UPDATE_REASON_MAX];
+
+ // The current reason why the backup update request is happening.
+ BackupUpdateReason backup_update_reason_;
+
+ // Data to POST when doing an update.
+ std::string update_list_data_;
+
// When true, protocol manager will not start an update unless
// ForceScheduleNextUpdate() is called. This is set for testing purpose.
bool disable_auto_update_;
diff --git a/chrome/browser/safe_browsing/protocol_manager_helper.h b/chrome/browser/safe_browsing/protocol_manager_helper.h
index 0072dd7..77ba873 100644
--- a/chrome/browser/safe_browsing/protocol_manager_helper.h
+++ b/chrome/browser/safe_browsing/protocol_manager_helper.h
@@ -20,6 +20,9 @@ struct SafeBrowsingProtocolConfig {
~SafeBrowsingProtocolConfig();
std::string client_name;
std::string url_prefix;
+ std::string backup_connect_error_url_prefix;
+ std::string backup_http_error_url_prefix;
+ std::string backup_network_error_url_prefix;
std::string version;
bool disable_auto_update;
};
diff --git a/chrome/browser/safe_browsing/protocol_manager_unittest.cc b/chrome/browser/safe_browsing/protocol_manager_unittest.cc
index 3f56aac..0bd9c3a 100644
--- a/chrome/browser/safe_browsing/protocol_manager_unittest.cc
+++ b/chrome/browser/safe_browsing/protocol_manager_unittest.cc
@@ -23,6 +23,9 @@ using testing::_;
using testing::Invoke;
static const char kUrlPrefix[] = "https://prefix.com/foo";
+static const char kBackupConnectUrlPrefix[] = "https://alt1-prefix.com/foo";
+static const char kBackupHttpUrlPrefix[] = "https://alt2-prefix.com/foo";
+static const char kBackupNetworkUrlPrefix[] = "https://alt3-prefix.com/foo";
static const char kClient[] = "unittest";
static const char kAppVer[] = "1.0";
static const char kAdditionalQuery[] = "additional_query";
@@ -45,22 +48,31 @@ class SafeBrowsingProtocolManagerTest : public testing::Test {
SafeBrowsingProtocolConfig config;
config.client_name = kClient;
config.url_prefix = kUrlPrefix;
+ config.backup_connect_error_url_prefix = kBackupConnectUrlPrefix;
+ config.backup_http_error_url_prefix = kBackupHttpUrlPrefix;
+ config.backup_network_error_url_prefix = kBackupNetworkUrlPrefix;
config.version = kAppVer;
return scoped_ptr<SafeBrowsingProtocolManager>(
SafeBrowsingProtocolManager::Create(delegate, NULL, config));
}
- void ValidateUpdateFetcherRequest(const net::TestURLFetcher* url_fetcher) {
+ void ValidateUpdateFetcherRequest(
+ const net::TestURLFetcher* url_fetcher,
+ const std::string& expected_prefix) {
ASSERT_TRUE(url_fetcher);
EXPECT_EQ(net::LOAD_DISABLE_CACHE, url_fetcher->GetLoadFlags());
EXPECT_EQ("goog-phish-shavar;\ngoog-malware-shavar;\n",
url_fetcher->upload_data());
- EXPECT_EQ(GURL("https://prefix.com/foo/downloads?client=unittest&appver=1.0"
+ EXPECT_EQ(GURL(expected_prefix + "/downloads?client=unittest&appver=1.0"
"&pver=2.2" + key_param_),
url_fetcher->GetOriginalURL());
}
+ void ValidateUpdateFetcherRequest(const net::TestURLFetcher* url_fetcher) {
+ ValidateUpdateFetcherRequest(url_fetcher, kUrlPrefix);
+ }
+
void ValidateRedirectFetcherRequest(const net::TestURLFetcher* url_fetcher,
const std::string& expected_url) {
ASSERT_TRUE(url_fetcher);
@@ -372,7 +384,7 @@ TEST_F(SafeBrowsingProtocolManagerTest, ExistingDatabase) {
EXPECT_TRUE(pm->IsUpdateScheduled());
}
-TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseBadBody) {
+TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseBadBodyBackupSuccess) {
scoped_refptr<base::TestSimpleTaskRunner> runner(
new base::TestSimpleTaskRunner());
base::ThreadTaskRunnerHandle runner_handler(runner);
@@ -384,7 +396,7 @@ TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseBadBody) {
Invoke(testing::CreateFunctor(InvokeGetChunksCallback,
std::vector<SBListChunkRanges>(),
false)));
- EXPECT_CALL(test_delegate, UpdateFinished(false)).Times(1);
+ EXPECT_CALL(test_delegate, UpdateFinished(true)).Times(1);
scoped_ptr<SafeBrowsingProtocolManager> pm(
CreateProtocolManager(&test_delegate));
@@ -403,11 +415,24 @@ TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseBadBody) {
url_fetcher->SetResponseString("THIS_IS_A_BAD_RESPONSE");
url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+ // There should now be a backup request.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher,
+ kBackupHttpUrlPrefix);
+
+ // Respond to the backup successfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(200);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
EXPECT_TRUE(pm->IsUpdateScheduled());
}
-// Tests what happens when there is an error in the update response.
-TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseHttpError) {
+// Tests what happens when there is an HTTP error response to the update
+// request, as well as an error response to the backup update request.
+TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseHttpErrorBackupError) {
scoped_refptr<base::TestSimpleTaskRunner> runner(
new base::TestSimpleTaskRunner());
base::ThreadTaskRunnerHandle runner_handler(runner);
@@ -438,11 +463,122 @@ TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseHttpError) {
url_fetcher->SetResponseString("");
url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+ // There should now be a backup request.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher, kBackupHttpUrlPrefix);
+
+ // Respond to the backup unsuccessfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(404);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
+ EXPECT_TRUE(pm->IsUpdateScheduled());
+}
+
+// Tests what happens when there is an HTTP error response to the update
+// request, followed by a successful response to the backup update request.
+TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseHttpErrorBackupSuccess) {
+ scoped_refptr<base::TestSimpleTaskRunner> runner(
+ new base::TestSimpleTaskRunner());
+ base::ThreadTaskRunnerHandle runner_handler(runner);
+ net::TestURLFetcherFactory url_fetcher_factory;
+
+ testing::StrictMock<MockProtocolDelegate> test_delegate;
+ EXPECT_CALL(test_delegate, UpdateStarted()).Times(1);
+ EXPECT_CALL(test_delegate, GetChunks(_)).WillOnce(
+ Invoke(testing::CreateFunctor(InvokeGetChunksCallback,
+ std::vector<SBListChunkRanges>(),
+ false)));
+ EXPECT_CALL(test_delegate, UpdateFinished(true)).Times(1);
+
+ scoped_ptr<SafeBrowsingProtocolManager> pm(
+ CreateProtocolManager(&test_delegate));
+
+ // Kick off initialization. This returns chunks from the DB synchronously.
+ pm->ForceScheduleNextUpdate(TimeDelta());
+ runner->RunPendingTasks();
+
+ // We should have an URLFetcher at this point in time.
+ net::TestURLFetcher* url_fetcher = url_fetcher_factory.GetFetcherByID(0);
+ ValidateUpdateFetcherRequest(url_fetcher);
+
+ // Go ahead and respond to it.
+ url_fetcher->set_status(net::URLRequestStatus());
+ url_fetcher->set_response_code(404);
+ url_fetcher->SetResponseString("");
+ url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+
+ // There should now be a backup request.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher,
+ kBackupHttpUrlPrefix);
+
+ // Respond to the backup successfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(200);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
EXPECT_TRUE(pm->IsUpdateScheduled());
}
-// Tests what happens when there is an error with the connection.
-TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseConnectionError) {
+// Tests what happens when there is an HTTP error response to the update
+// request, and a timeout on the backup update request.
+TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseHttpErrorBackupTimeout) {
+ scoped_refptr<base::TestSimpleTaskRunner> runner(
+ new base::TestSimpleTaskRunner());
+ base::ThreadTaskRunnerHandle runner_handler(runner);
+ net::TestURLFetcherFactory url_fetcher_factory;
+
+ testing::StrictMock<MockProtocolDelegate> test_delegate;
+ EXPECT_CALL(test_delegate, UpdateStarted()).Times(1);
+ EXPECT_CALL(test_delegate, GetChunks(_)).WillOnce(
+ Invoke(testing::CreateFunctor(InvokeGetChunksCallback,
+ std::vector<SBListChunkRanges>(),
+ false)));
+ EXPECT_CALL(test_delegate, UpdateFinished(false)).Times(1);
+
+ scoped_ptr<SafeBrowsingProtocolManager> pm(
+ CreateProtocolManager(&test_delegate));
+
+ // Kick off initialization. This returns chunks from the DB synchronously.
+ pm->ForceScheduleNextUpdate(TimeDelta());
+ runner->RunPendingTasks();
+
+ // We should have an URLFetcher at this point in time.
+ net::TestURLFetcher* url_fetcher = url_fetcher_factory.GetFetcherByID(0);
+ ValidateUpdateFetcherRequest(url_fetcher);
+
+ // Go ahead and respond to it.
+ url_fetcher->set_status(net::URLRequestStatus());
+ url_fetcher->set_response_code(404);
+ url_fetcher->SetResponseString("");
+ url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+
+ // There should now be a backup request.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher, kBackupHttpUrlPrefix);
+
+ // Either one or two calls to RunPendingTasks are needed here. The first run
+ // of RunPendingTasks will run the canceled timeout task associated with
+ // the first Update request. Depending on timing, this will either directly
+ // call the timeout task from the backup request, or schedule another task
+ // to run that in the future.
+ // TODO(cbentzel): Less fragile approach.
+ runner->RunPendingTasks();
+ if (!pm->IsUpdateScheduled())
+ runner->RunPendingTasks();
+ EXPECT_TRUE(pm->IsUpdateScheduled());
+}
+
+// Tests what happens when there is a connection error when issuing the update
+// request, and an error with the backup update request.
+TEST_F(SafeBrowsingProtocolManagerTest,
+ UpdateResponseConnectionErrorBackupError) {
scoped_refptr<base::TestSimpleTaskRunner> runner(
new base::TestSimpleTaskRunner());
base::ThreadTaskRunnerHandle runner_handler(runner);
@@ -472,11 +608,72 @@ TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseConnectionError) {
net::ERR_CONNECTION_RESET));
url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+ // There should be a backup URLFetcher now.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher,
+ kBackupConnectUrlPrefix);
+
+ // Respond to the backup unsuccessfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(404);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
EXPECT_TRUE(pm->IsUpdateScheduled());
}
-// Tests what happens when there is a timeout before an update response.
-TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseTimeout) {
+// Tests what happens when there is a connection error when issuing the update
+// request, and a successful response to the backup update request.
+TEST_F(SafeBrowsingProtocolManagerTest,
+ UpdateResponseConnectionErrorBackupSuccess) {
+ scoped_refptr<base::TestSimpleTaskRunner> runner(
+ new base::TestSimpleTaskRunner());
+ base::ThreadTaskRunnerHandle runner_handler(runner);
+ net::TestURLFetcherFactory url_fetcher_factory;
+
+ testing::StrictMock<MockProtocolDelegate> test_delegate;
+ EXPECT_CALL(test_delegate, UpdateStarted()).Times(1);
+ EXPECT_CALL(test_delegate, GetChunks(_)).WillOnce(
+ Invoke(testing::CreateFunctor(InvokeGetChunksCallback,
+ std::vector<SBListChunkRanges>(),
+ false)));
+ EXPECT_CALL(test_delegate, UpdateFinished(true)).Times(1);
+
+ scoped_ptr<SafeBrowsingProtocolManager> pm(
+ CreateProtocolManager(&test_delegate));
+
+ // Kick off initialization. This returns chunks from the DB synchronously.
+ pm->ForceScheduleNextUpdate(TimeDelta());
+ runner->RunPendingTasks();
+
+ // We should have an URLFetcher at this point in time.
+ net::TestURLFetcher* url_fetcher = url_fetcher_factory.GetFetcherByID(0);
+ ValidateUpdateFetcherRequest(url_fetcher);
+
+ // Go ahead and respond to it.
+ url_fetcher->set_status(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_CONNECTION_RESET));
+ url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+
+ // There should be a backup URLFetcher now.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher,
+ kBackupConnectUrlPrefix);
+
+ // Respond to the backup unsuccessfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(200);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
+ EXPECT_TRUE(pm->IsUpdateScheduled());
+}
+// Tests what happens when there is a network state error when issuing the
+// update request, and an error with the backup update request.
+TEST_F(SafeBrowsingProtocolManagerTest,
+ UpdateResponseNetworkErrorBackupError) {
scoped_refptr<base::TestSimpleTaskRunner> runner(
new base::TestSimpleTaskRunner());
base::ThreadTaskRunnerHandle runner_handler(runner);
@@ -501,10 +698,118 @@ TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseTimeout) {
net::TestURLFetcher* url_fetcher = url_fetcher_factory.GetFetcherByID(0);
ValidateUpdateFetcherRequest(url_fetcher);
+ // Go ahead and respond to it.
+ url_fetcher->set_status(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INTERNET_DISCONNECTED));
+ url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+
+ // There should be a backup URLFetcher now.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher,
+ kBackupNetworkUrlPrefix);
+
+ // Respond to the backup unsuccessfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(404);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
+ EXPECT_TRUE(pm->IsUpdateScheduled());
+}
+
+// Tests what happens when there is a network state error when issuing the
+// update request, and a successful response to the backup update request.
+TEST_F(SafeBrowsingProtocolManagerTest,
+ UpdateResponseNetworkErrorBackupSuccess) {
+ scoped_refptr<base::TestSimpleTaskRunner> runner(
+ new base::TestSimpleTaskRunner());
+ base::ThreadTaskRunnerHandle runner_handler(runner);
+ net::TestURLFetcherFactory url_fetcher_factory;
+
+ testing::StrictMock<MockProtocolDelegate> test_delegate;
+ EXPECT_CALL(test_delegate, UpdateStarted()).Times(1);
+ EXPECT_CALL(test_delegate, GetChunks(_)).WillOnce(
+ Invoke(testing::CreateFunctor(InvokeGetChunksCallback,
+ std::vector<SBListChunkRanges>(),
+ false)));
+ EXPECT_CALL(test_delegate, UpdateFinished(true)).Times(1);
+
+ scoped_ptr<SafeBrowsingProtocolManager> pm(
+ CreateProtocolManager(&test_delegate));
+
+ // Kick off initialization. This returns chunks from the DB synchronously.
+ pm->ForceScheduleNextUpdate(TimeDelta());
+ runner->RunPendingTasks();
+
+ // We should have an URLFetcher at this point in time.
+ net::TestURLFetcher* url_fetcher = url_fetcher_factory.GetFetcherByID(0);
+ ValidateUpdateFetcherRequest(url_fetcher);
+
+ // Go ahead and respond to it.
+ url_fetcher->set_status(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INTERNET_DISCONNECTED));
+ url_fetcher->delegate()->OnURLFetchComplete(url_fetcher);
+
+ // There should be a backup URLFetcher now.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher,
+ kBackupNetworkUrlPrefix);
+
+ // Respond to the backup unsuccessfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(200);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
+ EXPECT_TRUE(pm->IsUpdateScheduled());
+}
+
+// Tests what happens when there is a timeout before an update response.
+TEST_F(SafeBrowsingProtocolManagerTest, UpdateResponseTimeoutBackupSuccess) {
+ scoped_refptr<base::TestSimpleTaskRunner> runner(
+ new base::TestSimpleTaskRunner());
+ base::ThreadTaskRunnerHandle runner_handler(runner);
+ net::TestURLFetcherFactory url_fetcher_factory;
+
+ testing::StrictMock<MockProtocolDelegate> test_delegate;
+ EXPECT_CALL(test_delegate, UpdateStarted()).Times(1);
+ EXPECT_CALL(test_delegate, GetChunks(_)).WillOnce(
+ Invoke(testing::CreateFunctor(InvokeGetChunksCallback,
+ std::vector<SBListChunkRanges>(),
+ false)));
+ EXPECT_CALL(test_delegate, UpdateFinished(true)).Times(1);
+
+ scoped_ptr<SafeBrowsingProtocolManager> pm(
+ CreateProtocolManager(&test_delegate));
+
+ // Kick off initialization. This returns chunks from the DB synchronously.
+ pm->ForceScheduleNextUpdate(TimeDelta());
+ runner->RunPendingTasks();
+
+ // We should have an URLFetcher at this point in time.
+ net::TestURLFetcher* url_fetcher = url_fetcher_factory.GetFetcherByID(0);
+ ValidateUpdateFetcherRequest(url_fetcher);
+
// The first time RunPendingTasks is called above, the update timeout timer is
// not handled. This call of RunPendingTasks will handle the update.
runner->RunPendingTasks();
+ // There should be a backup URLFetcher now.
+ net::TestURLFetcher* backup_url_fetcher =
+ url_fetcher_factory.GetFetcherByID(1);
+ ValidateUpdateFetcherRequest(backup_url_fetcher,
+ kBackupConnectUrlPrefix);
+
+ // Respond to the backup unsuccessfully.
+ backup_url_fetcher->set_status(net::URLRequestStatus());
+ backup_url_fetcher->set_response_code(200);
+ backup_url_fetcher->SetResponseString("");
+ backup_url_fetcher->delegate()->OnURLFetchComplete(backup_url_fetcher);
+
EXPECT_TRUE(pm->IsUpdateScheduled());
}
diff --git a/chrome/browser/safe_browsing/safe_browsing_service.cc b/chrome/browser/safe_browsing/safe_browsing_service.cc
index a2fc083..fdb291c 100644
--- a/chrome/browser/safe_browsing/safe_browsing_service.cc
+++ b/chrome/browser/safe_browsing/safe_browsing_service.cc
@@ -59,6 +59,20 @@ const FilePath::CharType kCookiesFile[] = FILE_PATH_LITERAL(" Cookies");
const char* const kSbDefaultURLPrefix =
"https://safebrowsing.google.com/safebrowsing";
+// The backup URL prefix used when there are issues establishing a connection
+// with the server at the primary URL.
+const char* const kSbBackupConnectErrorURLPrefix =
+ "https://alt1-safebrowsing.google.com/safebrowsing";
+
+// The backup URL prefix used when there are HTTP-specific issues with the
+// server at the primary URL.
+const char* const kSbBackupHttpErrorURLPrefix =
+ "https://alt2-safebrowsing.google.com/safebrowsing";
+
+// The backup URL prefix used when there are local network specific issues.
+const char* const kSbBackupNetworkErrorURLPrefix =
+ "https://alt3-safebrowsing.google.com/safebrowsing";
+
FilePath CookieFilePath() {
return FilePath(
SafeBrowsingService::GetBaseFilename().value() + kCookiesFile);
@@ -345,6 +359,9 @@ void SafeBrowsingService::StartOnIOThread() {
cmdline->HasSwitch(switches::kSbURLPrefix) ?
cmdline->GetSwitchValueASCII(switches::kSbURLPrefix) :
kSbDefaultURLPrefix;
+ config.backup_connect_error_url_prefix = kSbBackupConnectErrorURLPrefix;
+ config.backup_http_error_url_prefix = kSbBackupHttpErrorURLPrefix;
+ config.backup_network_error_url_prefix = kSbBackupNetworkErrorURLPrefix;
#if defined(FULL_SAFE_BROWSING)
DCHECK(database_manager_);