diff options
29 files changed, 1358 insertions, 514 deletions
diff --git a/chrome/browser/google/google_url_tracker.cc b/chrome/browser/google/google_url_tracker.cc index 9b06e33..e4f5edb 100644 --- a/chrome/browser/google/google_url_tracker.cc +++ b/chrome/browser/google/google_url_tracker.cc @@ -18,7 +18,6 @@ #include "chrome/browser/tab_contents/navigation_controller.h" #include "chrome/browser/tab_contents/tab_contents.h" #include "chrome/common/chrome_switches.h" -#include "chrome/common/net/url_fetcher_protect.h" #include "chrome/common/notification_service.h" #include "chrome/common/pref_names.h" #include "grit/generated_resources.h" @@ -112,13 +111,6 @@ GoogleURLTracker::GoogleURLTracker() net::NetworkChangeNotifier::AddObserver(this); - // Configure to max_retries at most kMaxRetries times for 5xx errors. - URLFetcherProtectEntry* protect = - URLFetcherProtectManager::GetInstance()->Register( - GURL(kSearchDomainCheckURL).host()); - static const int kMaxRetries = 5; - protect->SetMaxRetries(kMaxRetries); - MessageLoop::current()->PostTask(FROM_HERE, runnable_method_factory_.NewRunnableMethod( &GoogleURLTracker::QueueWakeupTask)); @@ -211,6 +203,11 @@ void GoogleURLTracker::StartFetchIfDesirable() { fetcher_->set_load_flags(net::LOAD_DISABLE_CACHE | net::LOAD_DO_NOT_SAVE_COOKIES); fetcher_->set_request_context(Profile::GetDefaultRequestContext()); + + // Configure to max_retries at most kMaxRetries times for 5xx errors. + static const int kMaxRetries = 5; + fetcher_->set_max_retries(kMaxRetries); + fetcher_->Start(); } diff --git a/chrome/chrome_common.gypi b/chrome/chrome_common.gypi index 9bc47ff..fde3253 100644 --- a/chrome/chrome_common.gypi +++ b/chrome/chrome_common.gypi @@ -458,8 +458,6 @@ 'common/net/socket_stream.h', 'common/net/url_fetcher.cc', 'common/net/url_fetcher.h', - 'common/net/url_fetcher_protect.cc', - 'common/net/url_fetcher_protect.h', 'common/net/url_request_context_getter.cc', 'common/net/url_request_context_getter.h', 'common/net/url_request_intercept_job.cc', diff --git a/chrome/common/net/url_fetcher.cc b/chrome/common/net/url_fetcher.cc index ebc876d..1cf05b1 100644 --- a/chrome/common/net/url_fetcher.cc +++ b/chrome/common/net/url_fetcher.cc @@ -14,15 +14,16 @@ #include "base/stl_util-inl.h" #include "base/string_util.h" #include "base/thread.h" -#include "chrome/common/net/url_fetcher_protect.h" #include "chrome/common/net/url_request_context_getter.h" #include "googleurl/src/gurl.h" #include "net/base/load_flags.h" #include "net/base/io_buffer.h" +#include "net/base/net_errors.h" #include "net/http/http_request_headers.h" #include "net/http/http_response_headers.h" #include "net/url_request/url_request.h" #include "net/url_request/url_request_context.h" +#include "net/url_request/url_request_throttler_manager.h" static const int kBufferSize = 4096; @@ -53,6 +54,9 @@ class URLFetcher::Core // safe to call this multiple times. void Stop(); + // Reports that the received content was malformed. + void ReceivedContentWasMalformed(); + // URLRequest::Delegate implementation. virtual void OnResponseStarted(URLRequest* request); virtual void OnReadCompleted(URLRequest* request, int bytes_read); @@ -85,13 +89,19 @@ class URLFetcher::Core // Wrapper functions that allow us to ensure actions happen on the right // thread. void StartURLRequest(); + void StartURLRequestWhenAppropriate(); void CancelURLRequest(); void OnCompletedURLRequest(const URLRequestStatus& status); + void NotifyMalformedContent(); // Deletes the request, removes it from the registry, and removes the // destruction observer. void ReleaseRequest(); + // Returns the max value of exponential back-off release time for + // |original_url_| and |url_|. + base::TimeTicks GetBackoffReleaseTime(); + URLFetcher* fetcher_; // Corresponding fetcher object GURL original_url_; // The URL we were asked to fetch GURL url_; // The URL we eventually wound up at @@ -116,20 +126,31 @@ class URLFetcher::Core std::string upload_content_; // HTTP POST payload std::string upload_content_type_; // MIME type of POST payload - // The overload protection entry for this URL. This is used to - // incrementally back off how rapidly we'll send requests to a particular - // URL, to avoid placing too much demand on the remote resource. We update - // this with the status of all requests as they return, and in turn use it - // to determine how long to wait before making another request. - URLFetcherProtectEntry* protect_entry_; + // Used to determine how long to wait before making a request or doing a + // retry. + // Both of them can only be accessed on the IO thread. + // We need not only the throttler entry for |original_URL|, but also the one + // for |url|. For example, consider the case that URL A redirects to URL B, + // for which the server returns a 500 response. In this case, the exponential + // back-off release time of URL A won't increase. If we retry without + // considering the back-off constraint of URL B, we may send out too many + // requests for URL A in a short period of time. + scoped_refptr<net::URLRequestThrottlerEntryInterface> + original_url_throttler_entry_; + scoped_refptr<net::URLRequestThrottlerEntryInterface> url_throttler_entry_; + // |num_retries_| indicates how many times we've failed to successfully // fetch this URL. Once this value exceeds the maximum number of retries - // specified by the protection manager, we'll give up. + // specified by the owner URLFetcher instance, we'll give up. int num_retries_; // True if the URLFetcher has been cancelled. bool was_cancelled_; + // Since GetBackoffReleaseTime() can only be called on the IO thread, we cache + // its value to be used by OnCompletedURLRequest on the creating thread. + base::TimeTicks backoff_release_time_; + static base::LazyInstance<Registry> g_registry; friend class URLFetcher; @@ -170,7 +191,8 @@ URLFetcher::URLFetcher(const GURL& url, Delegate* d) : ALLOW_THIS_IN_INITIALIZER_LIST( core_(new Core(this, url, request_type, d))), - automatically_retry_on_5xx_(true) { + automatically_retry_on_5xx_(true), + max_retries_(0) { } URLFetcher::~URLFetcher() { @@ -197,8 +219,6 @@ URLFetcher::Core::Core(URLFetcher* fetcher, load_flags_(net::LOAD_NORMAL), response_code_(-1), buffer_(new net::IOBuffer(kBufferSize)), - protect_entry_(URLFetcherProtectManager::GetInstance()->Register( - original_url_.host())), num_retries_(0), was_cancelled_(false) { } @@ -214,10 +234,10 @@ void URLFetcher::Core::Start() { CHECK(request_context_getter_) << "We need an URLRequestContext!"; io_message_loop_proxy_ = request_context_getter_->GetIOMessageLoopProxy(); CHECK(io_message_loop_proxy_.get()) << "We need an IO message loop proxy"; - io_message_loop_proxy_->PostDelayedTask( + + io_message_loop_proxy_->PostTask( FROM_HERE, - NewRunnableMethod(this, &Core::StartURLRequest), - protect_entry_->UpdateBackoff(URLFetcherProtectEntry::SEND)); + NewRunnableMethod(this, &Core::StartURLRequestWhenAppropriate)); } void URLFetcher::Core::Stop() { @@ -230,6 +250,14 @@ void URLFetcher::Core::Stop() { } } +void URLFetcher::Core::ReceivedContentWasMalformed() { + DCHECK_EQ(MessageLoop::current(), delegate_loop_); + if (io_message_loop_proxy_.get()) { + io_message_loop_proxy_->PostTask( + FROM_HERE, NewRunnableMethod(this, &Core::NotifyMalformedContent)); + } +} + void URLFetcher::Core::CancelAll() { g_registry.Get().CancelAll(); } @@ -257,6 +285,8 @@ void URLFetcher::Core::OnReadCompleted(URLRequest* request, int bytes_read) { DCHECK(io_message_loop_proxy_->BelongsToCurrentThread()); url_ = request->url(); + url_throttler_entry_ = + net::URLRequestThrottlerManager::GetInstance()->RegisterRequestUrl(url_); do { if (!request_->status().is_success() || bytes_read <= 0) @@ -269,6 +299,8 @@ void URLFetcher::Core::OnReadCompleted(URLRequest* request, int bytes_read) { // See comments re: HEAD requests in OnResponseStarted(). if (!request_->status().is_io_pending() || (request_type_ == HEAD)) { + backoff_release_time_ = GetBackoffReleaseTime(); + delegate_loop_->PostTask(FROM_HERE, NewRunnableMethod( this, &Core::OnCompletedURLRequest, request_->status())); ReleaseRequest(); @@ -325,6 +357,30 @@ void URLFetcher::Core::StartURLRequest() { request_->Start(); } +void URLFetcher::Core::StartURLRequestWhenAppropriate() { + DCHECK(io_message_loop_proxy_->BelongsToCurrentThread()); + + if (was_cancelled_) + return; + + if (original_url_throttler_entry_ == NULL) { + original_url_throttler_entry_ = + net::URLRequestThrottlerManager::GetInstance()->RegisterRequestUrl( + original_url_); + } + + int64 delay = original_url_throttler_entry_->ReserveSendingTimeForNextRequest( + GetBackoffReleaseTime()); + if (delay == 0) { + StartURLRequest(); + } else { + MessageLoop::current()->PostDelayedTask( + FROM_HERE, + NewRunnableMethod(this, &Core::StartURLRequest), + delay); + } +} + void URLFetcher::Core::CancelURLRequest() { DCHECK(io_message_loop_proxy_->BelongsToCurrentThread()); @@ -344,30 +400,28 @@ void URLFetcher::Core::OnCompletedURLRequest(const URLRequestStatus& status) { DCHECK(MessageLoop::current() == delegate_loop_); // Checks the response from server. - if (response_code_ >= 500) { + if (response_code_ >= 500 || + status.os_error() == net::ERR_TEMPORARILY_THROTTLED) { // When encountering a server error, we will send the request again // after backoff time. - int64 back_off_time = - protect_entry_->UpdateBackoff(URLFetcherProtectEntry::FAILURE); - if (delegate_) { - fetcher_->backoff_delay_ = - base::TimeDelta::FromMilliseconds(back_off_time); - } ++num_retries_; // Restarts the request if we still need to notify the delegate. if (delegate_) { + fetcher_->backoff_delay_ = backoff_release_time_ - base::TimeTicks::Now(); + if (fetcher_->backoff_delay_ < base::TimeDelta()) + fetcher_->backoff_delay_ = base::TimeDelta(); + if (fetcher_->automatically_retry_on_5xx_ && - num_retries_ <= protect_entry_->max_retries()) { - io_message_loop_proxy_->PostDelayedTask( + num_retries_ <= fetcher_->max_retries()) { + io_message_loop_proxy_->PostTask( FROM_HERE, - NewRunnableMethod(this, &Core::StartURLRequest), back_off_time); + NewRunnableMethod(this, &Core::StartURLRequestWhenAppropriate)); } else { delegate_->OnURLFetchComplete(fetcher_, url_, status, response_code_, cookies_, data_); } } } else { - protect_entry_->UpdateBackoff(URLFetcherProtectEntry::SUCCESS); if (delegate_) { fetcher_->backoff_delay_ = base::TimeDelta(); delegate_->OnURLFetchComplete(fetcher_, url_, status, response_code_, @@ -376,13 +430,36 @@ void URLFetcher::Core::OnCompletedURLRequest(const URLRequestStatus& status) { } } +void URLFetcher::Core::NotifyMalformedContent() { + DCHECK(io_message_loop_proxy_->BelongsToCurrentThread()); + if (url_throttler_entry_ != NULL) + url_throttler_entry_->ReceivedContentWasMalformed(); +} + void URLFetcher::Core::ReleaseRequest() { request_.reset(); g_registry.Get().RemoveURLFetcherCore(this); } +base::TimeTicks URLFetcher::Core::GetBackoffReleaseTime() { + DCHECK(io_message_loop_proxy_->BelongsToCurrentThread()); + DCHECK(original_url_throttler_entry_ != NULL); + + base::TimeTicks original_url_backoff = + original_url_throttler_entry_->GetExponentialBackoffReleaseTime(); + base::TimeTicks destination_url_backoff; + if (url_throttler_entry_ != NULL && + original_url_throttler_entry_ != url_throttler_entry_) { + destination_url_backoff = + url_throttler_entry_->GetExponentialBackoffReleaseTime(); + } + + return original_url_backoff > destination_url_backoff ? + original_url_backoff : destination_url_backoff; +} + void URLFetcher::set_upload_data(const std::string& upload_content_type, - const std::string& upload_content) { + const std::string& upload_content) { core_->upload_content_type_ = upload_content_type; core_->upload_content_ = upload_content; } @@ -426,6 +503,10 @@ const GURL& URLFetcher::url() const { return core_->url_; } +void URLFetcher::ReceivedContentWasMalformed() { + core_->ReceivedContentWasMalformed(); +} + // static void URLFetcher::CancelAll() { Core::CancelAll(); diff --git a/chrome/common/net/url_fetcher.h b/chrome/common/net/url_fetcher.h index 51e15f1..547e906 100644 --- a/chrome/common/net/url_fetcher.h +++ b/chrome/common/net/url_fetcher.h @@ -16,6 +16,7 @@ #pragma once #include <string> +#include <vector> #include "base/message_loop.h" #include "base/ref_counted.h" @@ -152,6 +153,10 @@ class URLFetcher { // after backoff_delay() elapses. URLFetcher has it set to true by default. void set_automatically_retry_on_5xx(bool retry); + int max_retries() const { return max_retries_; } + + void set_max_retries(int max_retries) { max_retries_ = max_retries; } + // Returns the back-off delay before the request will be retried, // when a 5xx response was received. base::TimeDelta backoff_delay() const { return backoff_delay_; } @@ -174,6 +179,9 @@ class URLFetcher { // Return the URL that this fetcher is processing. const GURL& url() const; + // Reports that the received content was malformed. + void ReceivedContentWasMalformed(); + // Cancels all existing URLRequests. Will notify the URLFetcher::Delegates. // Note that any new URLFetchers created while this is running will not be // cancelled. Typically, one would call this in the CleanUp() method of an IO @@ -203,6 +211,8 @@ class URLFetcher { bool automatically_retry_on_5xx_; // Back-off time delay. 0 by default. base::TimeDelta backoff_delay_; + // Maximum retries allowed. + int max_retries_; static bool g_interception_enabled; diff --git a/chrome/common/net/url_fetcher_protect.cc b/chrome/common/net/url_fetcher_protect.cc index f078fd4..e69de29 100644 --- a/chrome/common/net/url_fetcher_protect.cc +++ b/chrome/common/net/url_fetcher_protect.cc @@ -1,180 +0,0 @@ -// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "chrome/common/net/url_fetcher_protect.h" - -#include "base/logging.h" - -// URLFetcherProtectEntry ---------------------------------------------------- - -using base::TimeDelta; -using base::TimeTicks; - -// Default parameters. Time is in milliseconds. -// static -const int URLFetcherProtectEntry::kDefaultSlidingWindowPeriod = 2000; - -const int URLFetcherProtectEntry::kDefaultMaxSendThreshold = 20; -const int URLFetcherProtectEntry::kDefaultMaxRetries = 0; - -const int URLFetcherProtectEntry::kDefaultInitialTimeout = 100; -const double URLFetcherProtectEntry::kDefaultMultiplier = 2.0; -const int URLFetcherProtectEntry::kDefaultConstantFactor = 100; -const int URLFetcherProtectEntry::kDefaultMaximumTimeout = 60000; - - -URLFetcherProtectEntry::URLFetcherProtectEntry() - : sliding_window_period_(kDefaultSlidingWindowPeriod), - max_send_threshold_(kDefaultMaxSendThreshold), - max_retries_(kDefaultMaxRetries), - initial_timeout_(kDefaultInitialTimeout), - multiplier_(kDefaultMultiplier), - constant_factor_(kDefaultConstantFactor), - maximum_timeout_(kDefaultMaximumTimeout) { - ResetBackoff(); -} - -URLFetcherProtectEntry::URLFetcherProtectEntry(int sliding_window_period, - int max_send_threshold, - int max_retries, - int initial_timeout, - double multiplier, - int constant_factor, - int maximum_timeout) - : sliding_window_period_(sliding_window_period), - max_send_threshold_(max_send_threshold), - max_retries_(max_retries), - initial_timeout_(initial_timeout), - multiplier_(multiplier), - constant_factor_(constant_factor), - maximum_timeout_(maximum_timeout) { - ResetBackoff(); -} - -URLFetcherProtectEntry::~URLFetcherProtectEntry() {} - -int64 URLFetcherProtectEntry::UpdateBackoff(EventType event_type) { - // request may be sent in different threads - AutoLock lock(lock_); - - TimeDelta t; - switch (event_type) { - case SEND: - t = AntiOverload(); - break; - case SUCCESS: - t = ResetBackoff(); - break; - case FAILURE: - t = IncreaseBackoff(); - break; - default: - NOTREACHED(); - } - - int64 wait = t.InMilliseconds(); - DCHECK(wait >= 0); - return wait; -} - -TimeDelta URLFetcherProtectEntry::AntiOverload() { - TimeDelta sw = TimeDelta::FromMilliseconds(sliding_window_period_); - TimeTicks now = TimeTicks::Now(); - // Estimate when the next request will be sent. - release_time_ = now; - if (send_log_.size() > 0) { - release_time_ = std::max(release_time_, send_log_.back()); - } - // Checks if there are too many send events in recent time. - if (send_log_.size() >= static_cast<unsigned>(max_send_threshold_)) { - release_time_ = std::max(release_time_, send_log_.front() + sw); - } - // Logs the new send event. - send_log_.push(release_time_); - // Drops the out-of-date events in the event list. - while (!send_log_.empty() && - (send_log_.front() + sw <= send_log_.back())) { - send_log_.pop(); - } - return release_time_ - now; -} - -TimeDelta URLFetcherProtectEntry::ResetBackoff() { - timeout_period_ = initial_timeout_; - release_time_ = TimeTicks::Now(); - return TimeDelta::FromMilliseconds(0); -} - -TimeDelta URLFetcherProtectEntry::IncreaseBackoff() { - TimeTicks now = TimeTicks::Now(); - - release_time_ = std::max(release_time_, now) + - TimeDelta::FromMilliseconds(timeout_period_); - - // Calculates the new backoff time. - timeout_period_ = static_cast<int> - (multiplier_ * timeout_period_ + constant_factor_); - if (maximum_timeout_ && timeout_period_ > maximum_timeout_) - timeout_period_ = maximum_timeout_; - - return release_time_ - now; -} - -// URLFetcherProtectManager -------------------------------------------------- - -// static -scoped_ptr<URLFetcherProtectManager> URLFetcherProtectManager::protect_manager_; -Lock URLFetcherProtectManager::lock_; - -URLFetcherProtectManager::~URLFetcherProtectManager() { - // Deletes all entries - ProtectService::iterator i; - for (i = services_.begin(); i != services_.end(); ++i) { - if (i->second) - delete i->second; - } -} - -// static -URLFetcherProtectManager* URLFetcherProtectManager::GetInstance() { - AutoLock lock(lock_); - - if (protect_manager_.get() == NULL) { - protect_manager_.reset(new URLFetcherProtectManager()); - } - return protect_manager_.get(); -} - -URLFetcherProtectEntry* URLFetcherProtectManager::Register( - const std::string& id) { - AutoLock lock(lock_); - - ProtectService::iterator i = services_.find(id); - - if (i != services_.end()) { - // The entry exists. - return i->second; - } - - // Creates a new entry. - URLFetcherProtectEntry* entry = new URLFetcherProtectEntry(); - services_[id] = entry; - return entry; -} - -URLFetcherProtectEntry* URLFetcherProtectManager::Register( - const std::string& id, URLFetcherProtectEntry* entry) { - AutoLock lock(lock_); - - ProtectService::iterator i = services_.find(id); - if (i != services_.end()) { - // The entry exists. - delete i->second; - } - - services_[id] = entry; - return entry; -} - -URLFetcherProtectManager::URLFetcherProtectManager() {} diff --git a/chrome/common/net/url_fetcher_protect.h b/chrome/common/net/url_fetcher_protect.h index 6372640..e69de29 100644 --- a/chrome/common/net/url_fetcher_protect.h +++ b/chrome/common/net/url_fetcher_protect.h @@ -1,152 +0,0 @@ -// Copyright (c) 2010 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. -// -// This file implements backoff in the suggest system so that we don't -// DOS the Suggest servers when using URLFetcher. - -#ifndef CHROME_COMMON_NET_URL_FETCHER_PROTECT_H_ -#define CHROME_COMMON_NET_URL_FETCHER_PROTECT_H_ -#pragma once - -#include <map> -#include <queue> -#include <string> - -#include "base/lock.h" -#include "base/scoped_ptr.h" -#include "base/time.h" - - -// This class is used to manage one service's rate protection. It maintains -// a queue of connection successes and failures and analyzes the requests -// over some period of time, in order to deduce the backoff time of every -// request. -// The backoff algorithm consists of two parts. Firstly, avoid too many -// send events in a sliding window. That will prevent traffic overload. -// Secondly, exponential backoff is used when receiving an error message -// from server. Exponential backoff period is calculated using the following -// formula: -// -// initial backoff time (the first time to receive error) -// backoff = k * current_backoff + c (the second, third, ... error) -// maximum backoff time (when backoff > maximum backoff time) -// -// where |k| is the multiplier, and |c| is the constant factor. -class URLFetcherProtectEntry { - public: - enum EventType { - SEND, // request will be sent out - SUCCESS, // successful response - FAILURE // no response or error - }; - - URLFetcherProtectEntry(); - URLFetcherProtectEntry(int sliding_window_period, int max_send_threshold, - int max_retries, int initial_timeout, - double multiplier, int constant_factor, - int maximum_timeout); - - - virtual ~URLFetcherProtectEntry(); - - // When a connection event happens, log it to the queue, and recalculate - // the timeout period. It returns the backoff time, in milliseconds, that - // indicates to the sender how long should it wait before sending the request. - // If the request is allowed to be sent immediately, the backoff time is 0. - int64 UpdateBackoff(EventType event_type); - - // Returns the max retries allowed. - int max_retries() const { - return max_retries_; - } - - // Sets the max retries. - void SetMaxRetries(int max_retries) { - max_retries_ = max_retries; - } - - private: - // When a request comes, calculate the release time for it. - // Returns the backoff time before sending. - base::TimeDelta AntiOverload(); - // Resets backoff when service is ok. - // Returns the backoff time before sending. - base::TimeDelta ResetBackoff(); - // Calculates new backoff when encountering a failure. - // Returns the backoff time before sending. - base::TimeDelta IncreaseBackoff(); - - // Default parameters. Time is in milliseconds. - static const int kDefaultSlidingWindowPeriod; - static const int kDefaultMaxSendThreshold; - static const int kDefaultMaxRetries; - static const int kDefaultInitialTimeout; - static const double kDefaultMultiplier; - static const int kDefaultConstantFactor; - static const int kDefaultMaximumTimeout; - - // time to consider events when checking backoff - int sliding_window_period_; - - // maximum number of requests allowed in sliding window period - int max_send_threshold_; - // maximum retris allowed - int max_retries_; - - // initial timeout on first failure - int initial_timeout_; - // factor by which to multiply on exponential backoff (e.g., 2.0) - double multiplier_; - // constant time term to add to each attempt - int constant_factor_; - // maximum amount of time between requests - int maximum_timeout_; - - // current exponential backoff period - int timeout_period_; - // time that protection is scheduled to end - base::TimeTicks release_time_; - - // Sets up a lock to ensure thread safe. - Lock lock_; - - // A list of the recent send events. We ues them to decide whether - // there are too many requests sent in sliding window. - std::queue<base::TimeTicks> send_log_; - - DISALLOW_COPY_AND_ASSIGN(URLFetcherProtectEntry); -}; - - -// This singleton class is used to manage all protect entries. -// Now we use the host name as service id. -class URLFetcherProtectManager { - public: - ~URLFetcherProtectManager(); - - // Returns the global instance of this class. - static URLFetcherProtectManager* GetInstance(); - - // Registers a new entry in this service. If the entry already exists, - // just returns it. Ownership of the return object remains with the manager. - URLFetcherProtectEntry* Register(const std::string& id); - // Always registers the entry even when it exists; any existing entry for this - // id will be deleted and existing references to it will become invalid. - // Ownership of the return object remains with the manager. - URLFetcherProtectEntry* Register(const std::string& id, - URLFetcherProtectEntry* entry); - - private: - URLFetcherProtectManager(); - - typedef std::map<const std::string, URLFetcherProtectEntry*> ProtectService; - - static Lock lock_; - static scoped_ptr<URLFetcherProtectManager> protect_manager_; - ProtectService services_; - - DISALLOW_COPY_AND_ASSIGN(URLFetcherProtectManager); -}; - -#endif // CHROME_COMMON_NET_URL_FETCHER_PROTECT_H_ diff --git a/chrome/common/net/url_fetcher_unittest.cc b/chrome/common/net/url_fetcher_unittest.cc index 65fb3c5..cd213e7 100644 --- a/chrome/common/net/url_fetcher_unittest.cc +++ b/chrome/common/net/url_fetcher_unittest.cc @@ -8,11 +8,11 @@ #include "build/build_config.h" #include "chrome/common/chrome_plugin_lib.h" #include "chrome/common/net/url_fetcher.h" -#include "chrome/common/net/url_fetcher_protect.h" #include "chrome/common/net/url_request_context_getter.h" #include "net/http/http_response_headers.h" -#include "net/url_request/url_request_unittest.h" #include "net/test/test_server.h" +#include "net/url_request/url_request_throttler_manager.h" +#include "net/url_request/url_request_unittest.h" #include "testing/gtest/include/gtest/gtest.h" #if defined(USE_NSS) @@ -312,6 +312,7 @@ void URLFetcherProtectTest::CreateFetcher(const GURL& url) { fetcher_->set_request_context(new TestURLRequestContextGetter( io_message_loop_proxy())); start_time_ = Time::Now(); + fetcher_->set_max_retries(11); fetcher_->Start(); } @@ -338,7 +339,7 @@ void URLFetcherProtectTest::OnURLFetchComplete(const URLFetcher* source, fetcher_->Start(); } else { // We have already sent 20 requests continuously. And we expect that - // it takes more than 1 second due to the overload pretection settings. + // it takes more than 1 second due to the overload protection settings. EXPECT_TRUE(Time::Now() - start_time_ >= one_second); URLFetcherTest::OnURLFetchComplete(source, url, status, response_code, cookies, data); @@ -352,6 +353,7 @@ void URLFetcherProtectTestPassedThrough::CreateFetcher(const GURL& url) { io_message_loop_proxy())); fetcher_->set_automatically_retry_on_5xx(false); start_time_ = Time::Now(); + fetcher_->set_max_retries(11); fetcher_->Start(); } @@ -419,6 +421,7 @@ void URLFetcherCancelTest::CreateFetcher(const GURL& url) { CancelTestURLRequestContextGetter* context_getter = new CancelTestURLRequestContextGetter(io_message_loop_proxy()); fetcher_->set_request_context(context_getter); + fetcher_->set_max_retries(2); fetcher_->Start(); // We need to wait for the creation of the URLRequestContext, since we // rely on it being destroyed as a signal to end the test. @@ -500,14 +503,16 @@ TEST_F(URLFetcherProtectTest, Overload) { // Registers an entry for test url. It only allows 3 requests to be sent // in 200 milliseconds. - URLFetcherProtectManager* manager = URLFetcherProtectManager::GetInstance(); - URLFetcherProtectEntry* entry = - new URLFetcherProtectEntry(200, 3, 11, 1, 2.0, 0, 256); - manager->Register(url.host(), entry); + scoped_refptr<net::URLRequestThrottlerEntry> entry( + new net::URLRequestThrottlerEntry(200, 3, 1, 0, 2.0, 0.0, 256)); + net::URLRequestThrottlerManager::GetInstance()->OverrideEntryForTests( + url, entry); CreateFetcher(url); MessageLoop::current()->Run(); + + net::URLRequestThrottlerManager::GetInstance()->EraseEntryForTests(url); } TEST_F(URLFetcherProtectTest, ServerUnavailable) { @@ -520,14 +525,16 @@ TEST_F(URLFetcherProtectTest, ServerUnavailable) { // new_backoff = 2.0 * old_backoff + 0 // and maximum backoff time is 256 milliseconds. // Maximum retries allowed is set to 11. - URLFetcherProtectManager* manager = URLFetcherProtectManager::GetInstance(); - URLFetcherProtectEntry* entry = - new URLFetcherProtectEntry(200, 3, 11, 1, 2.0, 0, 256); - manager->Register(url.host(), entry); + scoped_refptr<net::URLRequestThrottlerEntry> entry( + new net::URLRequestThrottlerEntry(200, 3, 1, 0, 2.0, 0.0, 256)); + net::URLRequestThrottlerManager::GetInstance()->OverrideEntryForTests( + url, entry); CreateFetcher(url); MessageLoop::current()->Run(); + + net::URLRequestThrottlerManager::GetInstance()->EraseEntryForTests(url); } TEST_F(URLFetcherProtectTestPassedThrough, ServerUnavailablePropagateResponse) { @@ -538,20 +545,21 @@ TEST_F(URLFetcherProtectTestPassedThrough, ServerUnavailablePropagateResponse) { // Registers an entry for test url. The backoff time is calculated by: // new_backoff = 2.0 * old_backoff + 0 - // and maximum backoff time is 256 milliseconds. + // and maximum backoff time is 150000 milliseconds. // Maximum retries allowed is set to 11. - URLFetcherProtectManager* manager = URLFetcherProtectManager::GetInstance(); + scoped_refptr<net::URLRequestThrottlerEntry> entry( + new net::URLRequestThrottlerEntry(200, 3, 100, 0, 2.0, 0.0, 150000)); // Total time if *not* for not doing automatic backoff would be 150s. // In reality it should be "as soon as server responds". - URLFetcherProtectEntry* entry = - new URLFetcherProtectEntry(200, 3, 11, 100, 2.0, 0, 150000); - manager->Register(url.host(), entry); + net::URLRequestThrottlerManager::GetInstance()->OverrideEntryForTests( + url, entry); CreateFetcher(url); MessageLoop::current()->Run(); -} + net::URLRequestThrottlerManager::GetInstance()->EraseEntryForTests(url); +} TEST_F(URLFetcherBadHTTPSTest, BadHTTPSTest) { net::TestServer::HTTPSOptions https_options( @@ -573,10 +581,10 @@ TEST_F(URLFetcherCancelTest, ReleasesContext) { // new_backoff = 2.0 * old_backoff + 0 // The initial backoff is 2 seconds and maximum backoff is 4 seconds. // Maximum retries allowed is set to 2. - URLFetcherProtectManager* manager = URLFetcherProtectManager::GetInstance(); - URLFetcherProtectEntry* entry = - new URLFetcherProtectEntry(200, 3, 2, 2000, 2.0, 0, 4000); - manager->Register(url.host(), entry); + scoped_refptr<net::URLRequestThrottlerEntry> entry( + new net::URLRequestThrottlerEntry(200, 3, 2000, 0, 2.0, 0.0, 4000)); + net::URLRequestThrottlerManager::GetInstance()->OverrideEntryForTests( + url, entry); // Create a separate thread that will create the URLFetcher. The current // (main) thread will do the IO, and when the fetch is complete it will @@ -588,6 +596,8 @@ TEST_F(URLFetcherCancelTest, ReleasesContext) { t.message_loop()->PostTask(FROM_HERE, new FetcherWrapperTask(this, url)); MessageLoop::current()->Run(); + + net::URLRequestThrottlerManager::GetInstance()->EraseEntryForTests(url); } TEST_F(URLFetcherCancelTest, CancelWhileDelayedStartTaskPending) { @@ -597,22 +607,14 @@ TEST_F(URLFetcherCancelTest, CancelWhileDelayedStartTaskPending) { GURL url(test_server.GetURL("files/server-unavailable.html")); // Register an entry for test url. - // - // Ideally we would mock URLFetcherProtectEntry to return XXX seconds - // in response to entry->UpdateBackoff(SEND). - // - // Unfortunately this function is time sensitive, so we fudge some numbers - // to make it at least somewhat likely to have a non-zero deferred - // delay when running. - // - // Using a sliding window of 2 seconds, and max of 1 request, under a fast + // Using a sliding window of 4 seconds, and max of 1 request, under a fast // run we expect to have a 4 second delay when posting the Start task. - URLFetcherProtectManager* manager = URLFetcherProtectManager::GetInstance(); - URLFetcherProtectEntry* entry = - new URLFetcherProtectEntry(2000, 1, 2, 2000, 2.0, 0, 4000); - EXPECT_EQ(0, entry->UpdateBackoff(URLFetcherProtectEntry::SEND)); - entry->UpdateBackoff(URLFetcherProtectEntry::SEND); // Returns about 2000. - manager->Register(url.host(), entry); + scoped_refptr<net::URLRequestThrottlerEntry> entry( + new net::URLRequestThrottlerEntry(4000, 1, 2000, 0, 2.0, 0.0, 4000)); + net::URLRequestThrottlerManager::GetInstance()->OverrideEntryForTests( + url, entry); + // Fake that a request has just started. + entry->ReserveSendingTimeForNextRequest(base::TimeTicks()); // The next request we try to send will be delayed by ~4 seconds. // The slower the test runs, the less the delay will be (since it takes the @@ -623,6 +625,8 @@ TEST_F(URLFetcherCancelTest, CancelWhileDelayedStartTaskPending) { t.message_loop()->PostTask(FROM_HERE, new FetcherWrapperTask(this, url)); MessageLoop::current()->Run(); + + net::URLRequestThrottlerManager::GetInstance()->EraseEntryForTests(url); } } // namespace. diff --git a/chrome/service/cloud_print/cloud_print_consts.cc b/chrome/service/cloud_print/cloud_print_consts.cc index 8227466..cf62f92 100644 --- a/chrome/service/cloud_print/cloud_print_consts.cc +++ b/chrome/service/cloud_print/cloud_print_consts.cc @@ -39,10 +39,6 @@ const char kCloudPrintPushNotificationsSource[] = "cloudprint.google.com"; // certain requests. const char kChromeCloudPrintProxyHeader[] = "X-Google-CloudPrint-Proxy: Chrome"; -// The request retry policy names. These strings are not valid hostnames, they -// are just string keys. -const char kCloudPrintAPIRetryPolicy[] = "cloudprint.google.com/api"; -const char kJobDataRetryPolicy[] = "cloudprint.google.com/jobdata"; // The string to be appended to the user-agent for cloudprint requests. const char kCloudPrintUserAgent[] = "GoogleCloudPrintProxy"; diff --git a/chrome/service/cloud_print/cloud_print_consts.h b/chrome/service/cloud_print/cloud_print_consts.h index 748a300..6c150b7 100644 --- a/chrome/service/cloud_print/cloud_print_consts.h +++ b/chrome/service/cloud_print/cloud_print_consts.h @@ -36,8 +36,6 @@ extern const char kCloudPrintGaiaServiceId[]; extern const char kSyncGaiaServiceId[]; extern const char kCloudPrintPushNotificationsSource[]; extern const char kChromeCloudPrintProxyHeader[]; -extern const char kCloudPrintAPIRetryPolicy[]; -extern const char kJobDataRetryPolicy[]; extern const char kCloudPrintUserAgent[]; extern const char kJobFetchReasonStartup[]; extern const char kJobFetchReasonPoll[]; @@ -46,8 +44,8 @@ extern const char kJobFetchReasonQueryMore[]; // Max retry count for job data fetch requests. const int kJobDataMaxRetryCount = 5; -// Look at CloudPrintProxyBackend::Core::CreateDefaultRetryPolicy for default -// values of the request retry policy. +// Max retry count (infinity) for API fetch requests. +const int kCloudPrintAPIMaxRetryCount = -1; // When we don't have XMPP notifications available, we resort to polling for // print jobs. We choose a random interval in seconds between these 2 values. diff --git a/chrome/service/cloud_print/cloud_print_proxy_backend.cc b/chrome/service/cloud_print/cloud_print_proxy_backend.cc index 7dac83e..dbb374f 100644 --- a/chrome/service/cloud_print/cloud_print_proxy_backend.cc +++ b/chrome/service/cloud_print/cloud_print_proxy_backend.cc @@ -15,7 +15,6 @@ #include "base/string_util.h" #include "base/utf_string_conversions.h" #include "base/values.h" -#include "chrome/common/net/url_fetcher_protect.h" #include "chrome/service/cloud_print/cloud_print_consts.h" #include "chrome/service/cloud_print/cloud_print_helpers.h" #include "chrome/service/cloud_print/cloud_print_url_fetcher.h" @@ -119,8 +118,6 @@ class CloudPrintProxyBackend::Core const std::string& email); void NotifyAuthenticationFailed(); - URLFetcherProtectEntry* CreateDefaultRetryPolicy(); - // Starts a new printer registration process. void StartRegistration(); // Ends the printer registration process. @@ -357,36 +354,9 @@ void CloudPrintProxyBackend::Core::DoInitializeWithToken( proxy_id_ = proxy_id; - // Register the request retry policies for cloud print APIs and job data - // requests. - URLFetcherProtectManager::GetInstance()->Register( - kCloudPrintAPIRetryPolicy, CreateDefaultRetryPolicy()); - URLFetcherProtectManager::GetInstance()->Register( - kJobDataRetryPolicy, CreateDefaultRetryPolicy())->SetMaxRetries( - kJobDataMaxRetryCount); - StartRegistration(); } -URLFetcherProtectEntry* -CloudPrintProxyBackend::Core::CreateDefaultRetryPolicy() { - // Times are in milliseconds. - const int kSlidingWindowPeriod = 2000; - const int kMaxSendThreshold = 20; - const int kMaxRetries = -1; - const int kInitialTimeout = 100; - const double kMultiplier = 2.0; - const int kConstantFactor = 100; - const int kMaximumTimeout = 5*60*1000; - return new URLFetcherProtectEntry(kSlidingWindowPeriod, - kMaxSendThreshold, - kMaxRetries, - kInitialTimeout, - kMultiplier, - kConstantFactor, - kMaximumTimeout); -} - void CloudPrintProxyBackend::Core::StartRegistration() { DCHECK(MessageLoop::current() == backend_->core_thread_.message_loop()); printer_list_.clear(); @@ -444,7 +414,7 @@ void CloudPrintProxyBackend::Core::GetRegisteredPrinters() { &CloudPrintProxyBackend::Core::HandlePrinterListResponse; request_ = new CloudPrintURLFetcher; request_->StartGetRequest(printer_list_url, this, auth_token_, - kCloudPrintAPIRetryPolicy); + kCloudPrintAPIMaxRetryCount); } void CloudPrintProxyBackend::Core::RegisterNextPrinter() { @@ -511,7 +481,7 @@ void CloudPrintProxyBackend::Core::RegisterNextPrinter() { &CloudPrintProxyBackend::Core::HandleRegisterPrinterResponse; request_ = new CloudPrintURLFetcher; request_->StartPostRequest(register_url, this, auth_token_, - kCloudPrintAPIRetryPolicy, mime_type, + kCloudPrintAPIMaxRetryCount, mime_type, post_data); } else { diff --git a/chrome/service/cloud_print/cloud_print_url_fetcher.cc b/chrome/service/cloud_print/cloud_print_url_fetcher.cc index 8996a0a..2e4fc55 100644 --- a/chrome/service/cloud_print/cloud_print_url_fetcher.cc +++ b/chrome/service/cloud_print/cloud_print_url_fetcher.cc @@ -7,7 +7,6 @@ #include "base/string_util.h" #include "base/values.h" #include "chrome/common/net/http_return.h" -#include "chrome/common/net/url_fetcher_protect.h" #include "chrome/service/cloud_print/cloud_print_consts.h" #include "chrome/service/cloud_print/cloud_print_helpers.h" #include "chrome/service/net/service_url_request_context.h" @@ -16,15 +15,14 @@ CloudPrintURLFetcher::CloudPrintURLFetcher() : delegate_(NULL), - protect_entry_(NULL), num_retries_(0) { } void CloudPrintURLFetcher::StartGetRequest(const GURL& url, Delegate* delegate, const std::string& auth_token, - const std::string& retry_policy) { - StartRequestHelper(url, URLFetcher::GET, delegate, auth_token, retry_policy, + int max_retries) { + StartRequestHelper(url, URLFetcher::GET, delegate, auth_token, max_retries, std::string(), std::string()); } @@ -32,10 +30,10 @@ void CloudPrintURLFetcher::StartPostRequest( const GURL& url, Delegate* delegate, const std::string& auth_token, - const std::string& retry_policy, + int max_retries, const std::string& post_data_mime_type, const std::string& post_data) { - StartRequestHelper(url, URLFetcher::POST, delegate, auth_token, retry_policy, + StartRequestHelper(url, URLFetcher::POST, delegate, auth_token, max_retries, post_data_mime_type, post_data); } @@ -88,23 +86,23 @@ void CloudPrintURLFetcher::OnURLFetchComplete( } // Retry the request if needed. if (action == RETRY_REQUEST) { - int64 back_off_time = - protect_entry_->UpdateBackoff(URLFetcherProtectEntry::FAILURE); + // If the response code is greater than or equal to 500, then the back-off + // period has been increased at the network level; otherwise, explicitly + // call ReceivedContentWasMalformed() to count the current request as a + // failure and increase the back-off period. + if (response_code < 500) + request_->ReceivedContentWasMalformed(); + ++num_retries_; - int max_retries = protect_entry_->max_retries(); - if ((-1 != max_retries) && (num_retries_ > max_retries)) { + if ((-1 != source->max_retries()) && + (num_retries_ > source->max_retries())) { // Retry limit reached. Give up. delegate_->OnRequestGiveUp(); } else { // Either no retry limit specified or retry limit has not yet been // reached. Try again. - MessageLoop::current()->PostDelayedTask( - FROM_HERE, - NewRunnableMethod(this, &CloudPrintURLFetcher::StartRequestNow), - back_off_time); + request_->Start(); } - } else { - protect_entry_->UpdateBackoff(URLFetcherProtectEntry::SUCCESS); } } @@ -113,7 +111,7 @@ void CloudPrintURLFetcher::StartRequestHelper( URLFetcher::RequestType request_type, Delegate* delegate, const std::string& auth_token, - const std::string& retry_policy, + int max_retries, const std::string& post_data_mime_type, const std::string& post_data) { DCHECK(delegate); @@ -121,6 +119,7 @@ void CloudPrintURLFetcher::StartRequestHelper( request_->set_request_context(GetRequestContextGetter()); // Since we implement our own retry logic, disable the retry in URLFetcher. request_->set_automatically_retry_on_5xx(false); + request_->set_max_retries(max_retries); delegate_ = delegate; std::string headers = "Authorization: GoogleLogin auth="; headers += auth_token; @@ -130,16 +129,7 @@ void CloudPrintURLFetcher::StartRequestHelper( if (request_type == URLFetcher::POST) { request_->set_upload_data(post_data_mime_type, post_data); } - // Initialize the retry policy for this request. - protect_entry_ = - URLFetcherProtectManager::GetInstance()->Register(retry_policy); - MessageLoop::current()->PostDelayedTask( - FROM_HERE, - NewRunnableMethod(this, &CloudPrintURLFetcher::StartRequestNow), - protect_entry_->UpdateBackoff(URLFetcherProtectEntry::SEND)); -} -void CloudPrintURLFetcher::StartRequestNow() { request_->Start(); } diff --git a/chrome/service/cloud_print/cloud_print_url_fetcher.h b/chrome/service/cloud_print/cloud_print_url_fetcher.h index cbf9bf2..68499b4 100644 --- a/chrome/service/cloud_print/cloud_print_url_fetcher.h +++ b/chrome/service/cloud_print/cloud_print_url_fetcher.h @@ -13,16 +13,13 @@ class DictionaryValue; class GURL; -class URLFetcherProtectEntry; class URLRequestStatus; // A wrapper around URLFetcher for CloudPrint. URLFetcher applies retry logic // only on HTTP response codes >= 500. In the cloud print case, we want to // retry on all network errors. In addition, we want to treat non-JSON responses // (for all CloudPrint APIs that expect JSON responses) as errors and they -// must also be retried. Also URLFetcher uses the host name of the URL as the -// key for applying the retry policy. In our case, we want to apply one global -// policy for many requests (not necessarily scoped by hostname). +// must also be retried. class CloudPrintURLFetcher : public base::RefCountedThreadSafe<CloudPrintURLFetcher>, public URLFetcher::Delegate { @@ -80,11 +77,11 @@ class CloudPrintURLFetcher void StartGetRequest(const GURL& url, Delegate* delegate, const std::string& auth_token, - const std::string& retry_policy); + int max_retries); void StartPostRequest(const GURL& url, Delegate* delegate, const std::string& auth_token, - const std::string& retry_policy, + int max_retries, const std::string& post_data_mime_type, const std::string& post_data); @@ -103,14 +100,12 @@ class CloudPrintURLFetcher URLFetcher::RequestType request_type, Delegate* delegate, const std::string& auth_token, - const std::string& retry_policy, + int max_retries, const std::string& post_data_mime_type, const std::string& post_data); - void StartRequestNow(); scoped_ptr<URLFetcher> request_; Delegate* delegate_; - URLFetcherProtectEntry* protect_entry_; int num_retries_; }; diff --git a/chrome/service/cloud_print/cloud_print_url_fetcher_unittest.cc b/chrome/service/cloud_print/cloud_print_url_fetcher_unittest.cc index e3ca15b..9380eaa 100644 --- a/chrome/service/cloud_print/cloud_print_url_fetcher_unittest.cc +++ b/chrome/service/cloud_print/cloud_print_url_fetcher_unittest.cc @@ -7,14 +7,14 @@ #include "base/ref_counted.h" #include "base/thread.h" #include "base/waitable_event.h" -#include "chrome/common/net/url_fetcher_protect.h" #include "chrome/common/net/url_request_context_getter.h" #include "chrome/service/service_process.h" #include "chrome/service/cloud_print/cloud_print_url_fetcher.h" #include "googleurl/src/gurl.h" #include "net/test/test_server.h" -#include "net/url_request/url_request_unittest.h" #include "net/url_request/url_request_status.h" +#include "net/url_request/url_request_throttler_manager.h" +#include "net/url_request/url_request_unittest.h" #include "testing/gtest/include/gtest/gtest.h" using base::Time; @@ -69,10 +69,10 @@ class TestCloudPrintURLFetcher : public CloudPrintURLFetcher { class CloudPrintURLFetcherTest : public testing::Test, public CloudPrintURLFetcher::Delegate { public: - CloudPrintURLFetcherTest() : fetcher_(NULL) { } + CloudPrintURLFetcherTest() : max_retries_(0), fetcher_(NULL) { } // Creates a URLFetcher, using the program's main thread to do IO. - virtual void CreateFetcher(const GURL& url, const std::string& retry_policy); + virtual void CreateFetcher(const GURL& url, int max_retries); // CloudPrintURLFetcher::Delegate virtual CloudPrintURLFetcher::ResponseAction HandleRawResponse( @@ -113,7 +113,7 @@ class CloudPrintURLFetcherTest : public testing::Test, // a UI thread, we dispatch a worker thread to do so. MessageLoopForIO io_loop_; scoped_refptr<base::MessageLoopProxy> io_message_loop_proxy_; - std::string retry_policy_; + int max_retries_; Time start_time_; scoped_refptr<CloudPrintURLFetcher> fetcher_; }; @@ -188,12 +188,11 @@ class CloudPrintURLFetcherRetryBackoffTest : public CloudPrintURLFetcherTest { }; -void CloudPrintURLFetcherTest::CreateFetcher(const GURL& url, - const std::string& retry_policy) { +void CloudPrintURLFetcherTest::CreateFetcher(const GURL& url, int max_retries) { fetcher_ = new TestCloudPrintURLFetcher(io_message_loop_proxy()); - retry_policy_ = retry_policy; + max_retries_ = max_retries; start_time_ = Time::Now(); - fetcher_->StartGetRequest(url, this, "", retry_policy_); + fetcher_->StartGetRequest(url, this, "", max_retries_); } CloudPrintURLFetcher::ResponseAction @@ -265,10 +264,10 @@ CloudPrintURLFetcherOverloadTest::HandleRawData(const URLFetcher* source, const TimeDelta one_second = TimeDelta::FromMilliseconds(1000); response_count_++; if (response_count_ < 20) { - fetcher_->StartGetRequest(url, this, "", retry_policy_); + fetcher_->StartGetRequest(url, this, "", max_retries_); } else { // We have already sent 20 requests continuously. And we expect that - // it takes more than 1 second due to the overload pretection settings. + // it takes more than 1 second due to the overload protection settings. EXPECT_TRUE(Time::Now() - start_time_ >= one_second); io_message_loop_proxy()->PostTask(FROM_HERE, new MessageLoop::QuitTask()); } @@ -297,7 +296,7 @@ TEST_F(CloudPrintURLFetcherBasicTest, HandleRawResponse) { ASSERT_TRUE(test_server.Start()); SetHandleRawResponse(true); - CreateFetcher(test_server.GetURL("echo"), "DummyRetryPolicy"); + CreateFetcher(test_server.GetURL("echo"), 0); MessageLoop::current()->Run(); } @@ -307,7 +306,7 @@ TEST_F(CloudPrintURLFetcherBasicTest, FLAKY_HandleRawData) { ASSERT_TRUE(test_server.Start()); SetHandleRawData(true); - CreateFetcher(test_server.GetURL("echo"), "DummyRetryPolicy"); + CreateFetcher(test_server.GetURL("echo"), 0); MessageLoop::current()->Run(); } @@ -319,15 +318,16 @@ TEST_F(CloudPrintURLFetcherOverloadTest, Protect) { // Registers an entry for test url. It only allows 3 requests to be sent // in 200 milliseconds. - std::string retry_policy = "OverloadTestPolicy"; - URLFetcherProtectManager* manager = URLFetcherProtectManager::GetInstance(); - URLFetcherProtectEntry* entry = - new URLFetcherProtectEntry(200, 3, 11, 1, 2.0, 0, 256); - manager->Register(retry_policy, entry); + scoped_refptr<net::URLRequestThrottlerEntry> entry( + new net::URLRequestThrottlerEntry(200, 3, 1, 0, 2.0, 0.0, 256)); + net::URLRequestThrottlerManager::GetInstance()->OverrideEntryForTests( + url, entry); - CreateFetcher(url, retry_policy); + CreateFetcher(url, 11); MessageLoop::current()->Run(); + + net::URLRequestThrottlerManager::GetInstance()->EraseEntryForTests(url); } // http://code.google.com/p/chromium/issues/detail?id=62758 @@ -341,15 +341,16 @@ TEST_F(CloudPrintURLFetcherRetryBackoffTest, FLAKY_GiveUp) { // new_backoff = 2.0 * old_backoff + 0 // and maximum backoff time is 256 milliseconds. // Maximum retries allowed is set to 11. - std::string retry_policy = "BackoffTestPolicy"; - URLFetcherProtectManager* manager = URLFetcherProtectManager::GetInstance(); - URLFetcherProtectEntry* entry = - new URLFetcherProtectEntry(200, 3, 11, 1, 2.0, 0, 256); - manager->Register(retry_policy, entry); + scoped_refptr<net::URLRequestThrottlerEntry> entry( + new net::URLRequestThrottlerEntry(200, 3, 1, 0, 2.0, 0.0, 256)); + net::URLRequestThrottlerManager::GetInstance()->OverrideEntryForTests( + url, entry); - CreateFetcher(url, retry_policy); + CreateFetcher(url, 11); MessageLoop::current()->Run(); + + net::URLRequestThrottlerManager::GetInstance()->EraseEntryForTests(url); } } // namespace. diff --git a/chrome/service/cloud_print/job_status_updater.cc b/chrome/service/cloud_print/job_status_updater.cc index 4719a6d..f717f29 100644 --- a/chrome/service/cloud_print/job_status_updater.cc +++ b/chrome/service/cloud_print/job_status_updater.cc @@ -62,7 +62,7 @@ void JobStatusUpdater::UpdateStatus() { request_->StartGetRequest( CloudPrintHelpers::GetUrlForJobStatusUpdate( cloud_print_server_url_, job_id_, last_job_details_), - this, auth_token_, kCloudPrintAPIRetryPolicy); + this, auth_token_, kCloudPrintAPIMaxRetryCount); } } } diff --git a/chrome/service/cloud_print/printer_job_handler.cc b/chrome/service/cloud_print/printer_job_handler.cc index e705762..d285594 100644 --- a/chrome/service/cloud_print/printer_job_handler.cc +++ b/chrome/service/cloud_print/printer_job_handler.cc @@ -91,7 +91,7 @@ void PrinterJobHandler::Start() { request_->StartGetRequest( CloudPrintHelpers::GetUrlForPrinterDelete( cloud_print_server_url_, printer_info_cloud_.printer_id), - this, auth_token_, kCloudPrintAPIRetryPolicy); + this, auth_token_, kCloudPrintAPIMaxRetryCount); } if (!task_in_progress_ && printer_update_pending_) { printer_update_pending_ = false; @@ -107,7 +107,7 @@ void PrinterJobHandler::Start() { CloudPrintHelpers::GetUrlForJobFetch( cloud_print_server_url_, printer_info_cloud_.printer_id, job_fetch_reason_), - this, auth_token_, kCloudPrintAPIRetryPolicy); + this, auth_token_, kCloudPrintAPIMaxRetryCount); last_job_fetch_time_ = base::TimeTicks::Now(); VLOG(1) << "Last job fetch time for printer " << printer_info_.printer_name.c_str() << " is " @@ -214,7 +214,7 @@ bool PrinterJobHandler::UpdatePrinterInfo() { request_->StartPostRequest( CloudPrintHelpers::GetUrlForPrinterUpdate( cloud_print_server_url_, printer_info_cloud_.printer_id), - this, auth_token_, kCloudPrintAPIRetryPolicy, mime_type, post_data); + this, auth_token_, kCloudPrintAPIMaxRetryCount, mime_type, post_data); ret = true; } return ret; @@ -355,7 +355,7 @@ PrinterJobHandler::HandleJobMetadataResponse( request_->StartGetRequest(GURL(print_ticket_url.c_str()), this, auth_token_, - kCloudPrintAPIRetryPolicy); + kCloudPrintAPIMaxRetryCount); } } } @@ -379,7 +379,7 @@ PrinterJobHandler::HandlePrintTicketResponse(const URLFetcher* source, request_->StartGetRequest(GURL(print_data_url_.c_str()), this, auth_token_, - kJobDataRetryPolicy); + kJobDataMaxRetryCount); } else { // The print ticket was not valid. We are done here. FailedFetchingJobData(); @@ -527,7 +527,7 @@ void PrinterJobHandler::UpdateJobStatus(cloud_print::PrintJobStatus status, status), this, auth_token_, - kCloudPrintAPIRetryPolicy); + kCloudPrintAPIMaxRetryCount); } } } diff --git a/chrome_frame/metrics_service.cc b/chrome_frame/metrics_service.cc index 24d8f3f..dd7b5f2 100644 --- a/chrome_frame/metrics_service.cc +++ b/chrome_frame/metrics_service.cc @@ -63,7 +63,6 @@ #include "base/utf_string_conversions.h" #include "chrome/common/chrome_version_info.h" #include "chrome/common/net/url_fetcher.h" -#include "chrome/common/net/url_fetcher_protect.h" #include "chrome/common/net/url_request_context_getter.h" #include "chrome/installer/util/browser_distribution.h" #include "chrome/installer/util/chrome_frame_distribution.h" diff --git a/net/base/net_error_list.h b/net/base/net_error_list.h index 96b19ad..2998a46 100644 --- a/net/base/net_error_list.h +++ b/net/base/net_error_list.h @@ -210,6 +210,9 @@ NET_ERROR(NAME_RESOLUTION_FAILED, -137) // errors. See also ERR_ACCESS_DENIED. NET_ERROR(NETWORK_ACCESS_DENIED, -138) +// The request throttler module cancelled this request to avoid DDOS. +NET_ERROR(TEMPORARILY_THROTTLED, -139) + // Certificate error codes // // The values of certificate error codes must be consecutive. diff --git a/net/net.gyp b/net/net.gyp index a5b1948..a5134da 100644 --- a/net/net.gyp +++ b/net/net.gyp @@ -685,6 +685,14 @@ 'url_request/url_request_status.h', 'url_request/url_request_test_job.cc', 'url_request/url_request_test_job.h', + 'url_request/url_request_throttler_entry.cc', + 'url_request/url_request_throttler_entry.h', + 'url_request/url_request_throttler_entry_interface.h', + 'url_request/url_request_throttler_header_adapter.h', + 'url_request/url_request_throttler_header_adapter.cc', + 'url_request/url_request_throttler_header_interface.h', + 'url_request/url_request_throttler_manager.cc', + 'url_request/url_request_throttler_manager.h', 'url_request/view_cache_helper.cc', 'url_request/view_cache_helper.h', 'websockets/websocket.cc', @@ -960,6 +968,7 @@ 'tools/dump_cache/url_utilities.cc', 'tools/dump_cache/url_utilities_unittest.cc', 'url_request/url_request_job_tracker_unittest.cc', + 'url_request/url_request_throttler_unittest.cc', 'url_request/url_request_unittest.cc', 'url_request/url_request_unittest.h', 'url_request/view_cache_helper_unittest.cc', diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc index f00f490..826f635 100644 --- a/net/url_request/url_request_http_job.cc +++ b/net/url_request/url_request_http_job.cc @@ -33,6 +33,8 @@ #include "net/url_request/url_request_context.h" #include "net/url_request/url_request_error_job.h" #include "net/url_request/url_request_redirect_job.h" +#include "net/url_request/url_request_throttler_header_adapter.h" +#include "net/url_request/url_request_throttler_manager.h" static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; @@ -91,6 +93,8 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) this, &URLRequestHttpJob::OnReadCompleted)), read_in_progress_(false), transaction_(NULL), + throttling_entry_(net::URLRequestThrottlerManager::GetInstance()-> + RegisterRequestUrl(request->url())), sdch_dictionary_advertised_(false), sdch_test_activated_(false), sdch_test_control_(false), @@ -569,6 +573,12 @@ void URLRequestHttpJob::NotifyHeadersComplete() { // also need this info. is_cached_content_ = response_info_->was_cached; + if (!is_cached_content_) { + net::URLRequestThrottlerHeaderAdapter response_adapter( + response_info_->headers); + throttling_entry_->UpdateWithResponse(&response_adapter); + } + ProcessStrictTransportSecurityHeader(); if (SdchManager::Global() && @@ -618,6 +628,7 @@ void URLRequestHttpJob::StartTransaction() { // with auth provided by username_ and password_. int rv; + if (transaction_.get()) { rv = transaction_->RestartWithAuth(username_, password_, &start_callback_); username_.clear(); @@ -629,8 +640,13 @@ void URLRequestHttpJob::StartTransaction() { rv = request_->context()->http_transaction_factory()->CreateTransaction( &transaction_); if (rv == net::OK) { - rv = transaction_->Start( - &request_info_, &start_callback_, request_->net_log()); + if (!throttling_entry_->IsDuringExponentialBackoff()) { + rv = transaction_->Start( + &request_info_, &start_callback_, request_->net_log()); + } else { + // Special error code for the exponential back-off module. + rv = net::ERR_TEMPORARILY_THROTTLED; + } // Make sure the context is alive for the duration of the // transaction. context_ = request_->context(); diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h index 431756a..c981047 100644 --- a/net/url_request/url_request_http_job.h +++ b/net/url_request/url_request_http_job.h @@ -15,6 +15,7 @@ #include "net/base/completion_callback.h" #include "net/http/http_request_info.h" #include "net/url_request/url_request_job.h" +#include "net/url_request/url_request_throttler_entry_interface.h" namespace net { class HttpResponseInfo; @@ -112,6 +113,9 @@ class URLRequestHttpJob : public URLRequestJob { scoped_ptr<net::HttpTransaction> transaction_; + // This is used to supervise traffic and enforce exponential back-off. + scoped_refptr<net::URLRequestThrottlerEntryInterface> throttling_entry_; + // Indicated if an SDCH dictionary was advertised, and hence an SDCH // compressed response is expected. We use this to help detect (accidental?) // proxy corruption of a response, which sometimes marks SDCH content as diff --git a/net/url_request/url_request_throttler_entry.cc b/net/url_request/url_request_throttler_entry.cc new file mode 100644 index 0000000..4abb438 --- /dev/null +++ b/net/url_request/url_request_throttler_entry.cc @@ -0,0 +1,242 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/url_request/url_request_throttler_entry.h" + +#include <cmath> + +#include "base/logging.h" +#include "base/rand_util.h" +#include "base/string_number_conversions.h" +#include "net/url_request/url_request_throttler_header_interface.h" + +namespace net { + +const int URLRequestThrottlerEntry::kDefaultSlidingWindowPeriodMs = 2000; +const int URLRequestThrottlerEntry::kDefaultMaxSendThreshold = 20; +const int URLRequestThrottlerEntry::kDefaultInitialBackoffMs = 700; +const int URLRequestThrottlerEntry::kDefaultAdditionalConstantMs = 100; +const double URLRequestThrottlerEntry::kDefaultMultiplyFactor = 1.4; +const double URLRequestThrottlerEntry::kDefaultJitterFactor = 0.4; +const int URLRequestThrottlerEntry::kDefaultMaximumBackoffMs = 60 * 60 * 1000; +const int URLRequestThrottlerEntry::kDefaultEntryLifetimeMs = 120000; +const char URLRequestThrottlerEntry::kRetryHeaderName[] = "X-Retry-After"; + +URLRequestThrottlerEntry::URLRequestThrottlerEntry() + : sliding_window_period_( + base::TimeDelta::FromMilliseconds(kDefaultSlidingWindowPeriodMs)), + max_send_threshold_(kDefaultMaxSendThreshold), + initial_backoff_ms_(kDefaultInitialBackoffMs), + additional_constant_ms_(kDefaultAdditionalConstantMs), + multiply_factor_(kDefaultMultiplyFactor), + jitter_factor_(kDefaultJitterFactor), + maximum_backoff_ms_(kDefaultMaximumBackoffMs), + entry_lifetime_ms_(kDefaultEntryLifetimeMs) { + Initialize(); +} + +URLRequestThrottlerEntry::URLRequestThrottlerEntry( + int sliding_window_period_ms, + int max_send_threshold, + int initial_backoff_ms, + int additional_constant_ms, + double multiply_factor, + double jitter_factor, + int maximum_backoff_ms) + : sliding_window_period_( + base::TimeDelta::FromMilliseconds(sliding_window_period_ms)), + max_send_threshold_(max_send_threshold), + initial_backoff_ms_(initial_backoff_ms), + additional_constant_ms_(additional_constant_ms), + multiply_factor_(multiply_factor), + jitter_factor_(jitter_factor), + maximum_backoff_ms_(maximum_backoff_ms), + entry_lifetime_ms_(-1) { + DCHECK_GT(sliding_window_period_ms, 0); + DCHECK_GT(max_send_threshold_, 0); + DCHECK_GE(initial_backoff_ms_, 0); + DCHECK_GE(additional_constant_ms_, 0); + DCHECK_GT(multiply_factor_, 0); + DCHECK_GE(jitter_factor_, 0); + DCHECK_LT(jitter_factor_, 1); + DCHECK_GE(maximum_backoff_ms_, 0); + + Initialize(); +} + +URLRequestThrottlerEntry::~URLRequestThrottlerEntry() { +} + +void URLRequestThrottlerEntry::Initialize() { + // Since this method is called by the constructors, GetTimeNow() (a virtual + // method) is not used. + exponential_backoff_release_time_ = base::TimeTicks::Now(); + failure_count_ = 0; + latest_response_was_failure_ = false; + + sliding_window_release_time_ = base::TimeTicks::Now(); +} + +bool URLRequestThrottlerEntry::IsDuringExponentialBackoff() const { + return exponential_backoff_release_time_ > GetTimeNow(); +} + +int64 URLRequestThrottlerEntry::ReserveSendingTimeForNextRequest( + const base::TimeTicks& earliest_time) { + base::TimeTicks now = GetTimeNow(); + // If a lot of requests were successfully made recently, + // sliding_window_release_time_ may be greater than + // exponential_backoff_release_time_. + base::TimeTicks recommended_sending_time = + std::max(std::max(now, earliest_time), + std::max(exponential_backoff_release_time_, + sliding_window_release_time_)); + + DCHECK(send_log_.empty() || + recommended_sending_time >= send_log_.back()); + // Log the new send event. + send_log_.push(recommended_sending_time); + + sliding_window_release_time_ = recommended_sending_time; + + // Drop the out-of-date events in the event list. + // We don't need to worry that the queue may become empty during this + // operation, since the last element is sliding_window_release_time_. + while ((send_log_.front() + sliding_window_period_ <= + sliding_window_release_time_) || + send_log_.size() > static_cast<unsigned>(max_send_threshold_)) { + send_log_.pop(); + } + + // Check if there are too many send events in recent time. + if (send_log_.size() == static_cast<unsigned>(max_send_threshold_)) + sliding_window_release_time_ = send_log_.front() + sliding_window_period_; + + return (recommended_sending_time - now).InMillisecondsRoundedUp(); +} + +base::TimeTicks + URLRequestThrottlerEntry::GetExponentialBackoffReleaseTime() const { + return exponential_backoff_release_time_; +} + +void URLRequestThrottlerEntry::UpdateWithResponse( + const URLRequestThrottlerHeaderInterface* response) { + if (response->GetResponseCode() >= 500) { + failure_count_++; + latest_response_was_failure_ = true; + exponential_backoff_release_time_ = + CalculateExponentialBackoffReleaseTime(); + } else { + // We slowly decay the number of times delayed instead of resetting it to 0 + // in order to stay stable if we received lots of requests with + // malformed bodies at the same time. + if (failure_count_ > 0) + failure_count_--; + + latest_response_was_failure_ = false; + + // The reason why we are not just cutting the release time to GetTimeNow() + // is on the one hand, it would unset delay put by our custom retry-after + // header and on the other we would like to push every request up to our + // "horizon" when dealing with multiple in-flight requests. Ex: If we send + // three requests and we receive 2 failures and 1 success. The success that + // follows those failures will not reset the release time, further requests + // will then need to wait the delay caused by the 2 failures. + exponential_backoff_release_time_ = std::max( + GetTimeNow(), exponential_backoff_release_time_); + + std::string retry_header = response->GetNormalizedValue(kRetryHeaderName); + if (!retry_header.empty()) + HandleCustomRetryAfter(retry_header); + } +} + +bool URLRequestThrottlerEntry::IsEntryOutdated() const { + if (entry_lifetime_ms_ == -1) + return false; + + base::TimeTicks now = GetTimeNow(); + + // If there are send events in the sliding window period, we still need this + // entry. + if (send_log_.size() > 0 && + send_log_.back() + sliding_window_period_ > now) { + return false; + } + + int64 unused_since_ms = + (now - exponential_backoff_release_time_).InMilliseconds(); + + // Release time is further than now, we are managing it. + if (unused_since_ms < 0) + return false; + + // latest_response_was_failure_ is true indicates that the latest one or + // more requests encountered server errors or had malformed response bodies. + // In that case, we don't want to collect the entry unless it hasn't been used + // for longer than the maximum allowed back-off. + if (latest_response_was_failure_) + return unused_since_ms > std::max(maximum_backoff_ms_, entry_lifetime_ms_); + + // Otherwise, consider the entry is outdated if it hasn't been used for the + // specified lifetime period. + return unused_since_ms > entry_lifetime_ms_; +} + +void URLRequestThrottlerEntry::ReceivedContentWasMalformed() { + // For any response that is marked as malformed now, we have probably + // considered it as a success when receiving it and decreased the failure + // count by 1. As a result, we increase the failure count by 2 here to undo + // the effect and record a failure. + // + // Please note that this may lead to a larger failure count than expected, + // because we don't decrease the failure count for successful responses when + // it has already reached 0. + failure_count_ += 2; + latest_response_was_failure_ = true; + exponential_backoff_release_time_ = CalculateExponentialBackoffReleaseTime(); +} + +base::TimeTicks + URLRequestThrottlerEntry::CalculateExponentialBackoffReleaseTime() { + double delay = initial_backoff_ms_; + delay *= pow(multiply_factor_, failure_count_); + delay += additional_constant_ms_; + delay -= base::RandDouble() * jitter_factor_ * delay; + + // Ensure that we do not exceed maximum delay. + int64 delay_int = static_cast<int64>(delay + 0.5); + delay_int = std::min(delay_int, static_cast<int64>(maximum_backoff_ms_)); + + return std::max(GetTimeNow() + base::TimeDelta::FromMilliseconds(delay_int), + exponential_backoff_release_time_); +} + +base::TimeTicks URLRequestThrottlerEntry::GetTimeNow() const { + return base::TimeTicks::Now(); +} + +void URLRequestThrottlerEntry::HandleCustomRetryAfter( + const std::string& header_value) { + // Input parameter is the number of seconds to wait in a floating point value. + double time_in_sec = 0; + bool conversion_is_ok = base::StringToDouble(header_value, &time_in_sec); + + // Conversion of custom retry-after header value failed. + if (!conversion_is_ok) + return; + + // We must use an int value later so we transform this in milliseconds. + int64 value_ms = static_cast<int64>(0.5 + time_in_sec * 1000); + + if (maximum_backoff_ms_ < value_ms || value_ms < 0) + return; + + exponential_backoff_release_time_ = std::max( + (GetTimeNow() + base::TimeDelta::FromMilliseconds(value_ms)), + exponential_backoff_release_time_); +} + +} // namespace net diff --git a/net/url_request/url_request_throttler_entry.h b/net/url_request/url_request_throttler_entry.h new file mode 100644 index 0000000..9b8955d --- /dev/null +++ b/net/url_request/url_request_throttler_entry.h @@ -0,0 +1,157 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_H_ +#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_H_ + +#include <queue> +#include <string> + +#include "base/basictypes.h" +#include "net/url_request/url_request_throttler_entry_interface.h" + +namespace net { + +// URLRequestThrottlerEntry represents an entry of URLRequestThrottlerManager. +// It analyzes requests of a specific URL over some period of time, in order to +// deduce the back-off time for every request. +// The back-off algorithm consists of two parts. Firstly, exponential back-off +// is used when receiving 5XX server errors or malformed response bodies. +// The exponential back-off rule is enforced by URLRequestHttpJob. Any request +// sent during the back-off period will be cancelled. +// Secondly, a sliding window is used to count recent requests to a given +// destination and provide guidance (to the application level only) on whether +// too many requests have been sent and when a good time to send the next one +// would be. This is never used to deny requests at the network level. +class URLRequestThrottlerEntry : public URLRequestThrottlerEntryInterface { + public: + // Sliding window period. + static const int kDefaultSlidingWindowPeriodMs; + + // Maximum number of requests allowed in sliding window period. + static const int kDefaultMaxSendThreshold; + + // Initial delay for exponential back-off. + static const int kDefaultInitialBackoffMs; + + // Additional constant to adjust back-off. + static const int kDefaultAdditionalConstantMs; + + // Factor by which the waiting time will be multiplied. + static const double kDefaultMultiplyFactor; + + // Fuzzing percentage. ex: 10% will spread requests randomly + // between 90%-100% of the calculated time. + static const double kDefaultJitterFactor; + + // Maximum amount of time we are willing to delay our request. + static const int kDefaultMaximumBackoffMs; + + // Time after which the entry is considered outdated. + static const int kDefaultEntryLifetimeMs; + + // Name of the header that servers can use to ask clients to delay their next + // request. + static const char kRetryHeaderName[]; + + URLRequestThrottlerEntry(); + + // The life span of instances created with this constructor is set to + // infinite. + // It is only used by unit tests. + URLRequestThrottlerEntry(int sliding_window_period_ms, + int max_send_threshold, + int initial_backoff_ms, + int additional_constant_ms, + double multiply_factor, + double jitter_factor, + int maximum_backoff_ms); + + // Implementation of URLRequestThrottlerEntryInterface. + virtual bool IsDuringExponentialBackoff() const; + virtual int64 ReserveSendingTimeForNextRequest( + const base::TimeTicks& earliest_time); + virtual base::TimeTicks GetExponentialBackoffReleaseTime() const; + virtual void UpdateWithResponse( + const URLRequestThrottlerHeaderInterface* response); + virtual void ReceivedContentWasMalformed(); + + // Used by the manager, returns true if the entry needs to be garbage + // collected. + bool IsEntryOutdated() const; + + protected: + virtual ~URLRequestThrottlerEntry(); + + void Initialize(); + + // Calculates the release time for exponential back-off. + base::TimeTicks CalculateExponentialBackoffReleaseTime(); + + // Equivalent to TimeTicks::Now(), virtual to be mockable for testing purpose. + virtual base::TimeTicks GetTimeNow() const; + + // Used internally to increase release time following a retry-after header. + void HandleCustomRetryAfter(const std::string& header_value); + + // Used by tests. + void set_exponential_backoff_release_time( + const base::TimeTicks& release_time) { + exponential_backoff_release_time_ = release_time; + } + + // Used by tests. + base::TimeTicks sliding_window_release_time() const { + return sliding_window_release_time_; + } + + // Used by tests. + void set_sliding_window_release_time(const base::TimeTicks& release_time) { + sliding_window_release_time_ = release_time; + } + + // Used by tests. + void set_failure_count(int failure_count) { + failure_count_ = failure_count; + } + + private: + // Timestamp calculated by the exponential back-off algorithm at which we are + // allowed to start sending requests again. + base::TimeTicks exponential_backoff_release_time_; + + // Number of times we encounter server errors or malformed response bodies. + int failure_count_; + + // If true, the last request response was a failure. + // Note that this member can be false at the same time as failure_count_ can + // be greater than 0, since we gradually decrease failure_count_, instead of + // resetting it to 0 directly, when we receive successful responses. + bool latest_response_was_failure_; + + // Timestamp calculated by the sliding window algorithm for when we advise + // clients the next request should be made, at the earliest. Advisory only, + // not used to deny requests. + base::TimeTicks sliding_window_release_time_; + + // A list of the recent send events. We use them to decide whether there are + // too many requests sent in sliding window. + std::queue<base::TimeTicks> send_log_; + + const base::TimeDelta sliding_window_period_; + const int max_send_threshold_; + const int initial_backoff_ms_; + const int additional_constant_ms_; + const double multiply_factor_; + const double jitter_factor_; + const int maximum_backoff_ms_; + // Set to -1 if the entry never expires. + const int entry_lifetime_ms_; + + DISALLOW_COPY_AND_ASSIGN(URLRequestThrottlerEntry); +}; + +} // namespace net + +#endif // NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_H_ diff --git a/net/url_request/url_request_throttler_entry_interface.h b/net/url_request/url_request_throttler_entry_interface.h new file mode 100644 index 0000000..616e1d0 --- /dev/null +++ b/net/url_request/url_request_throttler_entry_interface.h @@ -0,0 +1,63 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_INTERFACE_H_ +#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_INTERFACE_H_ + +#include "base/basictypes.h" +#include "base/ref_counted.h" +#include "base/time.h" + +namespace net { + +class URLRequestThrottlerHeaderInterface; + +// Interface provided on entries of the URL request throttler manager. +class URLRequestThrottlerEntryInterface + : public base::RefCounted<URLRequestThrottlerEntryInterface> { + public: + URLRequestThrottlerEntryInterface() {} + + // Returns true when we have encountered server errors and are doing + // exponential back-off. + // URLRequestHttpJob checks this method prior to every request; it cancels + // requests if this method returns true. + virtual bool IsDuringExponentialBackoff() const = 0; + + // Calculates a recommended sending time for the next request and reserves it. + // The sending time is not earlier than the current exponential back-off + // release time or |earliest_time|. Moreover, the previous results of + // the method are taken into account, in order to make sure they are spread + // properly over time. + // Returns the recommended delay before sending the next request, in + // milliseconds. The return value is always positive or 0. + // Although it is not mandatory, respecting the value returned by this method + // is helpful to avoid traffic overload. + virtual int64 ReserveSendingTimeForNextRequest( + const base::TimeTicks& earliest_time) = 0; + + // Returns the time after which requests are allowed. + virtual base::TimeTicks GetExponentialBackoffReleaseTime() const = 0; + + // This method needs to be called each time a response is received. + virtual void UpdateWithResponse( + const URLRequestThrottlerHeaderInterface* response) = 0; + + // Lets higher-level modules, that know how to parse particular response + // bodies, notify of receiving malformed content for the given URL. This will + // be handled by the throttler as if an HTTP 5xx response had been received to + // the request, i.e. it will count as a failure. + virtual void ReceivedContentWasMalformed() = 0; + + protected: + virtual ~URLRequestThrottlerEntryInterface() {} + + private: + friend class base::RefCounted<URLRequestThrottlerEntryInterface>; + DISALLOW_COPY_AND_ASSIGN(URLRequestThrottlerEntryInterface); +}; + +} // namespace net + +#endif // NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_INTERFACE_H_ diff --git a/net/url_request/url_request_throttler_header_adapter.cc b/net/url_request/url_request_throttler_header_adapter.cc new file mode 100644 index 0000000..e453071c --- /dev/null +++ b/net/url_request/url_request_throttler_header_adapter.cc @@ -0,0 +1,27 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/url_request/url_request_throttler_header_adapter.h" + +#include "net/http/http_response_headers.h" + +namespace net { + +URLRequestThrottlerHeaderAdapter::URLRequestThrottlerHeaderAdapter( + net::HttpResponseHeaders* headers) + : response_header_(headers) { +} + +std::string URLRequestThrottlerHeaderAdapter::GetNormalizedValue( + const std::string& key) const { + std::string return_value; + response_header_->GetNormalizedHeader(key, &return_value); + return return_value; +} + +int URLRequestThrottlerHeaderAdapter::GetResponseCode() const { + return response_header_->response_code(); +} + +} // namespace net diff --git a/net/url_request/url_request_throttler_header_adapter.h b/net/url_request/url_request_throttler_header_adapter.h new file mode 100644 index 0000000..599a9f6 --- /dev/null +++ b/net/url_request/url_request_throttler_header_adapter.h @@ -0,0 +1,34 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_ADAPTER_H_ +#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_ADAPTER_H_ + +#include <string> + +#include "base/ref_counted.h" +#include "net/url_request/url_request_throttler_header_interface.h" + +namespace net { + +class HttpResponseHeaders; + +// Adapter for the HTTP header interface of the URL request throttler component. +class URLRequestThrottlerHeaderAdapter + : public URLRequestThrottlerHeaderInterface { + public: + explicit URLRequestThrottlerHeaderAdapter(net::HttpResponseHeaders* headers); + virtual ~URLRequestThrottlerHeaderAdapter() {} + + // Implementation of URLRequestThrottlerHeaderInterface + virtual std::string GetNormalizedValue(const std::string& key) const; + virtual int GetResponseCode() const; + + private: + const scoped_refptr<net::HttpResponseHeaders> response_header_; +}; + +} // namespace net + +#endif // NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_ADAPTER_H_ diff --git a/net/url_request/url_request_throttler_header_interface.h b/net/url_request/url_request_throttler_header_interface.h new file mode 100644 index 0000000..c69d185 --- /dev/null +++ b/net/url_request/url_request_throttler_header_interface.h @@ -0,0 +1,28 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_INTERFACE_H_ +#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_INTERFACE_H_ + +#include <string> + +namespace net { + +// Interface to an HTTP header to enforce we have the methods we need. +class URLRequestThrottlerHeaderInterface { + public: + virtual ~URLRequestThrottlerHeaderInterface() {} + + // Method that enables us to fetch the header value by its key. + // ex: location: www.example.com -> key = "location" value = "www.example.com" + // If the key does not exist, it returns an empty string. + virtual std::string GetNormalizedValue(const std::string& key) const = 0; + + // Returns the HTTP response code associated with the request. + virtual int GetResponseCode() const = 0; +}; + +} // namespace net + +#endif // NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_INTERFACE_H_ diff --git a/net/url_request/url_request_throttler_manager.cc b/net/url_request/url_request_throttler_manager.cc new file mode 100644 index 0000000..5428d9a --- /dev/null +++ b/net/url_request/url_request_throttler_manager.cc @@ -0,0 +1,107 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/url_request/url_request_throttler_manager.h" + +#include "base/string_util.h" + +namespace net { + +const unsigned int URLRequestThrottlerManager::kMaximumNumberOfEntries = 1500; +const unsigned int URLRequestThrottlerManager::kRequestsBetweenCollecting = 200; + +URLRequestThrottlerManager* URLRequestThrottlerManager::GetInstance() { + return Singleton<URLRequestThrottlerManager>::get(); +} + +scoped_refptr<URLRequestThrottlerEntryInterface> + URLRequestThrottlerManager::RegisterRequestUrl(const GURL &url) { + // Normalize the url. + std::string url_id = GetIdFromUrl(url); + + // Periodically garbage collect old entries. + GarbageCollectEntriesIfNecessary(); + + // Find the entry in the map or create it. + scoped_refptr<URLRequestThrottlerEntry>& entry = url_entries_[url_id]; + if (entry == NULL) + entry = new URLRequestThrottlerEntry(); + + return entry; +} + +URLRequestThrottlerManager::URLRequestThrottlerManager() + : requests_since_last_gc_(0) { +} + +URLRequestThrottlerManager::~URLRequestThrottlerManager() { + // Delete all entries. + url_entries_.clear(); +} + +std::string URLRequestThrottlerManager::GetIdFromUrl(const GURL& url) const { + if (!url.is_valid()) + return url.possibly_invalid_spec(); + + if (url_id_replacements_ == NULL) { + url_id_replacements_.reset(new GURL::Replacements()); + + url_id_replacements_->ClearPassword(); + url_id_replacements_->ClearUsername(); + url_id_replacements_->ClearQuery(); + url_id_replacements_->ClearRef(); + } + + GURL id = url.ReplaceComponents(*url_id_replacements_); + return StringToLowerASCII(id.spec()); +} + +void URLRequestThrottlerManager::GarbageCollectEntries() { + UrlEntryMap::iterator i = url_entries_.begin(); + + while (i != url_entries_.end()) { + if ((i->second)->IsEntryOutdated()) { + url_entries_.erase(i++); + } else { + ++i; + } + } + + // In case something broke we want to make sure not to grow indefinitely. + while (url_entries_.size() > kMaximumNumberOfEntries) { + url_entries_.erase(url_entries_.begin()); + } +} + +void URLRequestThrottlerManager::GarbageCollectEntriesIfNecessary() { + requests_since_last_gc_++; + if (requests_since_last_gc_ < kRequestsBetweenCollecting) + return; + + requests_since_last_gc_ = 0; + GarbageCollectEntries(); +} + +void URLRequestThrottlerManager::OverrideEntryForTests( + const GURL& url, + URLRequestThrottlerEntry* entry) { + if (entry == NULL) + return; + + // Normalize the url. + std::string url_id = GetIdFromUrl(url); + + // Periodically garbage collect old entries. + GarbageCollectEntriesIfNecessary(); + + url_entries_[url_id] = entry; +} + +void URLRequestThrottlerManager::EraseEntryForTests(const GURL& url) { + // Normalize the url. + std::string url_id = GetIdFromUrl(url); + url_entries_.erase(url_id); +} + +} // namespace net diff --git a/net/url_request/url_request_throttler_manager.h b/net/url_request/url_request_throttler_manager.h new file mode 100644 index 0000000..6c8cd2f --- /dev/null +++ b/net/url_request/url_request_throttler_manager.h @@ -0,0 +1,101 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_MANAGER_H_ +#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_MANAGER_H_ + +#include <map> +#include <string> + +#include "base/basictypes.h" +#include "base/scoped_ptr.h" +#include "base/singleton.h" +#include "googleurl/src/gurl.h" +#include "net/url_request/url_request_throttler_entry.h" + +namespace net { + +// Class that registers URL request throttler entries for URLs being accessed in +// order to supervise traffic. URL requests for HTTP contents should register +// their URLs in this manager on each request. +// URLRequestThrottlerManager maintains a map of URL IDs to URL request +// throttler entries. It creates URL request throttler entries when new URLs are +// registered, and does garbage collection from time to time in order to clean +// out outdated entries. URL ID consists of lowercased scheme, host, port and +// path. All URLs converted to the same ID will share the same entry. +// +// NOTE: All usage of the singleton object of this class should be on the same +// thread. +class URLRequestThrottlerManager { + public: + static URLRequestThrottlerManager* GetInstance(); + + // Must be called for every request, returns the URL request throttler entry + // associated with the URL. The caller must inform this entry of some events. + // Please refer to url_request_throttler_entry_interface.h for further + // informations. + scoped_refptr<URLRequestThrottlerEntryInterface> RegisterRequestUrl( + const GURL& url); + + // Registers a new entry in this service and overrides the existing entry (if + // any) for the URL. The service will hold a reference to the entry. + // It is only used by unit tests. + void OverrideEntryForTests(const GURL& url, URLRequestThrottlerEntry* entry); + + // Explicitly erases an entry. + // This is useful to remove those entries which have got infinite lifetime and + // thus won't be garbage collected. + // It is only used by unit tests. + void EraseEntryForTests(const GURL& url); + + protected: + URLRequestThrottlerManager(); + ~URLRequestThrottlerManager(); + + // Method that allows us to transform a URL into an ID that can be used in our + // map. Resulting IDs will be lowercase and consist of the scheme, host, port + // and path (without query string, fragment, etc.). + // If the URL is invalid, the invalid spec will be returned, without any + // transformation. + std::string GetIdFromUrl(const GURL& url) const; + + // Method that ensures the map gets cleaned from time to time. The period at + // which garbage collecting happens is adjustable with the + // kRequestBetweenCollecting constant. + void GarbageCollectEntriesIfNecessary(); + // Method that does the actual work of garbage collecting. + void GarbageCollectEntries(); + + // Used by tests. + int GetNumberOfEntriesForTests() const { return url_entries_.size(); } + + private: + friend struct DefaultSingletonTraits<URLRequestThrottlerManager>; + + // From each URL we generate an ID composed of the scheme, host, port and path + // that allows us to uniquely map an entry to it. + typedef std::map<std::string, scoped_refptr<URLRequestThrottlerEntry> > + UrlEntryMap; + + // Maximum number of entries that we are willing to collect in our map. + static const unsigned int kMaximumNumberOfEntries; + // Number of requests that will be made between garbage collection. + static const unsigned int kRequestsBetweenCollecting; + + // Map that contains a list of URL ID and their matching + // URLRequestThrottlerEntry. + UrlEntryMap url_entries_; + + // This keeps track of how many requests have been made. Used with + // GarbageCollectEntries. + unsigned int requests_since_last_gc_; + + mutable scoped_ptr<GURL::Replacements> url_id_replacements_; + + DISALLOW_COPY_AND_ASSIGN(URLRequestThrottlerManager); +}; + +} // namespace net + +#endif // NET_URL_REQUEST_URL_REQUEST_THROTTLER_MANAGER_H_ diff --git a/net/url_request/url_request_throttler_unittest.cc b/net/url_request/url_request_throttler_unittest.cc new file mode 100644 index 0000000..0683f91 --- /dev/null +++ b/net/url_request/url_request_throttler_unittest.cc @@ -0,0 +1,346 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/pickle.h" +#include "base/scoped_ptr.h" +#include "base/string_number_conversions.h" +#include "base/time.h" +#include "net/base/test_completion_callback.h" +#include "net/url_request/url_request_context.h" +#include "net/url_request/url_request_throttler_header_interface.h" +#include "net/url_request/url_request_throttler_manager.h" +#include "testing/gtest/include/gtest/gtest.h" + +using base::TimeDelta; +using base::TimeTicks; + +namespace { +class MockURLRequestThrottlerManager; + +class MockURLRequestThrottlerEntry : public net::URLRequestThrottlerEntry { + public : + MockURLRequestThrottlerEntry() {} + MockURLRequestThrottlerEntry( + const TimeTicks& exponential_backoff_release_time, + const TimeTicks& sliding_window_release_time, + const TimeTicks& fake_now) + : fake_time_now_(fake_now) { + set_exponential_backoff_release_time(exponential_backoff_release_time); + set_sliding_window_release_time(sliding_window_release_time); + } + virtual ~MockURLRequestThrottlerEntry() {} + + void ResetToBlank(const TimeTicks& time_now) { + fake_time_now_ = time_now; + set_exponential_backoff_release_time(time_now); + set_failure_count(0); + set_sliding_window_release_time(time_now); + } + + // Overridden for tests. + virtual TimeTicks GetTimeNow() const { return fake_time_now_; } + + void set_exponential_backoff_release_time( + const base::TimeTicks& release_time) { + net::URLRequestThrottlerEntry::set_exponential_backoff_release_time( + release_time); + } + + base::TimeTicks sliding_window_release_time() const { + return net::URLRequestThrottlerEntry::sliding_window_release_time(); + } + + void set_sliding_window_release_time( + const base::TimeTicks& release_time) { + net::URLRequestThrottlerEntry::set_sliding_window_release_time( + release_time); + } + + TimeTicks fake_time_now_; +}; + +class MockURLRequestThrottlerHeaderAdapter + : public net::URLRequestThrottlerHeaderInterface { + public: + MockURLRequestThrottlerHeaderAdapter() + : fake_retry_value_("0.0"), + fake_response_code_(0) { + } + + MockURLRequestThrottlerHeaderAdapter(const std::string& retry_value, + int response_code) + : fake_retry_value_(retry_value), + fake_response_code_(response_code) { + } + + virtual ~MockURLRequestThrottlerHeaderAdapter() {} + + virtual std::string GetNormalizedValue(const std::string& key) const { + if (key == MockURLRequestThrottlerEntry::kRetryHeaderName) + return fake_retry_value_; + return ""; + } + + virtual int GetResponseCode() const { return fake_response_code_; } + + std::string fake_retry_value_; + int fake_response_code_; +}; + +class MockURLRequestThrottlerManager : public net::URLRequestThrottlerManager { + public: + MockURLRequestThrottlerManager() : create_entry_index_(0) {} + + // Method to process the URL using URLRequestThrottlerManager protected + // method. + std::string DoGetUrlIdFromUrl(const GURL& url) { return GetIdFromUrl(url); } + + // Method to use the garbage collecting method of URLRequestThrottlerManager. + void DoGarbageCollectEntries() { GarbageCollectEntries(); } + + // Returns the number of entries in the map. + int GetNumberOfEntries() const { return GetNumberOfEntriesForTests(); } + + void CreateEntry(bool is_outdated) { + TimeTicks time = TimeTicks::Now(); + if (is_outdated) { + time -= TimeDelta::FromMilliseconds( + MockURLRequestThrottlerEntry::kDefaultEntryLifetimeMs + 1000); + } + std::string fake_url_string("http://www.fakeurl.com/"); + fake_url_string.append(base::IntToString(create_entry_index_++)); + GURL fake_url(fake_url_string); + OverrideEntryForTests( + fake_url, + new MockURLRequestThrottlerEntry(time, TimeTicks::Now(), + TimeTicks::Now())); + } + + private: + int create_entry_index_; +}; + +struct TimeAndBool { + TimeAndBool(const TimeTicks& time_value, bool expected, int line_num) { + time = time_value; + result = expected; + line = line_num; + } + TimeTicks time; + bool result; + int line; +}; + +struct GurlAndString { + GurlAndString(const GURL& url_value, + const std::string& expected, + int line_num) { + url = url_value; + result = expected; + line = line_num; + } + GURL url; + std::string result; + int line; +}; + +} // namespace + +class URLRequestThrottlerEntryTest : public testing::Test { + protected: + virtual void SetUp(); + TimeTicks now_; + scoped_refptr<MockURLRequestThrottlerEntry> entry_; +}; + +void URLRequestThrottlerEntryTest::SetUp() { + now_ = TimeTicks::Now(); + entry_ = new MockURLRequestThrottlerEntry(); + entry_->ResetToBlank(now_); +} + +std::ostream& operator<<(std::ostream& out, const base::TimeTicks& time) { + return out << time.ToInternalValue(); +} + +TEST_F(URLRequestThrottlerEntryTest, InterfaceDuringExponentialBackoff) { + entry_->set_exponential_backoff_release_time( + entry_->fake_time_now_ + TimeDelta::FromMilliseconds(1)); + EXPECT_TRUE(entry_->IsDuringExponentialBackoff()); +} + +TEST_F(URLRequestThrottlerEntryTest, InterfaceNotDuringExponentialBackoff) { + entry_->set_exponential_backoff_release_time(entry_->fake_time_now_); + EXPECT_FALSE(entry_->IsDuringExponentialBackoff()); + entry_->set_exponential_backoff_release_time( + entry_->fake_time_now_ - TimeDelta::FromMilliseconds(1)); + EXPECT_FALSE(entry_->IsDuringExponentialBackoff()); +} + +TEST_F(URLRequestThrottlerEntryTest, InterfaceUpdateRetryAfter) { + // If the response we received has a retry-after field, + // the request should be delayed. + MockURLRequestThrottlerHeaderAdapter header_w_delay_header("5.5", 200); + entry_->UpdateWithResponse(&header_w_delay_header); + EXPECT_GT(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_) + << "When the server put a positive value in retry-after we should " + "increase release_time"; + + entry_->ResetToBlank(now_); + header_w_delay_header.fake_retry_value_ = "-5.5"; + EXPECT_EQ(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_) + << "When given a negative value, it should not change the release_time"; +} + +TEST_F(URLRequestThrottlerEntryTest, InterfaceUpdateFailure) { + MockURLRequestThrottlerHeaderAdapter failure_response("0", 505); + entry_->UpdateWithResponse(&failure_response); + EXPECT_GT(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_) + << "A failure should increase the release_time"; +} + +TEST_F(URLRequestThrottlerEntryTest, InterfaceUpdateSuccess) { + MockURLRequestThrottlerHeaderAdapter success_response("0", 200); + entry_->UpdateWithResponse(&success_response); + EXPECT_EQ(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_) + << "A success should not add any delay"; +} + +TEST_F(URLRequestThrottlerEntryTest, InterfaceUpdateSuccessThenFailure) { + MockURLRequestThrottlerHeaderAdapter failure_response("0", 500); + MockURLRequestThrottlerHeaderAdapter success_response("0", 200); + entry_->UpdateWithResponse(&success_response); + entry_->UpdateWithResponse(&failure_response); + EXPECT_GT(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_) + << "This scenario should add delay"; +} + +TEST_F(URLRequestThrottlerEntryTest, IsEntryReallyOutdated) { + TimeDelta lifetime = TimeDelta::FromMilliseconds( + MockURLRequestThrottlerEntry::kDefaultEntryLifetimeMs); + const TimeDelta kFiveMs = TimeDelta::FromMilliseconds(5); + + TimeAndBool test_values[] = { + TimeAndBool(now_, false, __LINE__), + TimeAndBool(now_ - kFiveMs, false, __LINE__), + TimeAndBool(now_ + kFiveMs, false, __LINE__), + TimeAndBool(now_ - lifetime, false, __LINE__), + TimeAndBool(now_ - (lifetime + kFiveMs), true, __LINE__)}; + + for (unsigned int i = 0; i < arraysize(test_values); ++i) { + entry_->set_exponential_backoff_release_time(test_values[i].time); + EXPECT_EQ(entry_->IsEntryOutdated(), test_values[i].result) << + "Test case #" << i << " line " << test_values[i].line << " failed"; + } +} + +TEST_F(URLRequestThrottlerEntryTest, MaxAllowedBackoff) { + for (int i = 0; i < 30; ++i) { + MockURLRequestThrottlerHeaderAdapter response_adapter("0.0", 505); + entry_->UpdateWithResponse(&response_adapter); + } + + TimeDelta delay = entry_->GetExponentialBackoffReleaseTime() - now_; + EXPECT_EQ(delay.InMilliseconds(), + MockURLRequestThrottlerEntry::kDefaultMaximumBackoffMs); +} + +TEST_F(URLRequestThrottlerEntryTest, MalformedContent) { + MockURLRequestThrottlerHeaderAdapter response_adapter("0.0", 505); + for (int i = 0; i < 5; ++i) + entry_->UpdateWithResponse(&response_adapter); + + TimeTicks release_after_failures = entry_->GetExponentialBackoffReleaseTime(); + + // Inform the entry that a response body was malformed, which is supposed to + // increase the back-off time. + entry_->ReceivedContentWasMalformed(); + EXPECT_GT(entry_->GetExponentialBackoffReleaseTime(), release_after_failures); +} + +TEST_F(URLRequestThrottlerEntryTest, SlidingWindow) { + int max_send = net::URLRequestThrottlerEntry::kDefaultMaxSendThreshold; + int sliding_window = + net::URLRequestThrottlerEntry::kDefaultSlidingWindowPeriodMs; + + TimeTicks time_1 = entry_->fake_time_now_ + + TimeDelta::FromMilliseconds(sliding_window / 3); + TimeTicks time_2 = entry_->fake_time_now_ + + TimeDelta::FromMilliseconds(2 * sliding_window / 3); + TimeTicks time_3 = entry_->fake_time_now_ + + TimeDelta::FromMilliseconds(sliding_window); + TimeTicks time_4 = entry_->fake_time_now_ + + TimeDelta::FromMilliseconds(sliding_window + 2 * sliding_window / 3); + + entry_->set_exponential_backoff_release_time(time_1); + + for (int i = 0; i < max_send / 2; ++i) { + EXPECT_EQ(2 * sliding_window / 3, + entry_->ReserveSendingTimeForNextRequest(time_2)); + } + EXPECT_EQ(time_2, entry_->sliding_window_release_time()); + + entry_->fake_time_now_ = time_3; + + for (int i = 0; i < (max_send + 1) / 2; ++i) + EXPECT_EQ(0, entry_->ReserveSendingTimeForNextRequest(TimeTicks())); + + EXPECT_EQ(time_4, entry_->sliding_window_release_time()); +} + +TEST(URLRequestThrottlerManager, IsUrlStandardised) { + MockURLRequestThrottlerManager manager; + GurlAndString test_values[] = { + GurlAndString(GURL("http://www.example.com"), + std::string("http://www.example.com/"), __LINE__), + GurlAndString(GURL("http://www.Example.com"), + std::string("http://www.example.com/"), __LINE__), + GurlAndString(GURL("http://www.ex4mple.com/Pr4c71c41"), + std::string("http://www.ex4mple.com/pr4c71c41"), __LINE__), + GurlAndString(GURL("http://www.example.com/0/token/false"), + std::string("http://www.example.com/0/token/false"), + __LINE__), + GurlAndString(GURL("http://www.example.com/index.php?code=javascript"), + std::string("http://www.example.com/index.php"), __LINE__), + GurlAndString(GURL("http://www.example.com/index.php?code=1#superEntry"), + std::string("http://www.example.com/index.php"), + __LINE__), + GurlAndString(GURL("http://www.example.com:1234/"), + std::string("http://www.example.com:1234/"), __LINE__)}; + + for (unsigned int i = 0; i < arraysize(test_values); ++i) { + std::string temp = manager.DoGetUrlIdFromUrl(test_values[i].url); + EXPECT_EQ(temp, test_values[i].result) << + "Test case #" << i << " line " << test_values[i].line << " failed"; + } +} + +TEST(URLRequestThrottlerManager, AreEntriesBeingCollected) { + MockURLRequestThrottlerManager manager; + + manager.CreateEntry(true); // true = Entry is outdated. + manager.CreateEntry(true); + manager.CreateEntry(true); + manager.DoGarbageCollectEntries(); + EXPECT_EQ(0, manager.GetNumberOfEntries()); + + manager.CreateEntry(false); + manager.CreateEntry(false); + manager.CreateEntry(false); + manager.CreateEntry(true); + manager.DoGarbageCollectEntries(); + EXPECT_EQ(3, manager.GetNumberOfEntries()); +} + +TEST(URLRequestThrottlerManager, IsHostBeingRegistered) { + MockURLRequestThrottlerManager manager; + + manager.RegisterRequestUrl(GURL("http://www.example.com/")); + manager.RegisterRequestUrl(GURL("http://www.google.com/")); + manager.RegisterRequestUrl(GURL("http://www.google.com/index/0")); + manager.RegisterRequestUrl(GURL("http://www.google.com/index/0?code=1")); + manager.RegisterRequestUrl(GURL("http://www.google.com/index/0#lolsaure")); + + EXPECT_EQ(3, manager.GetNumberOfEntries()); +} |