summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authormirandac@chromium.org <mirandac@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-28 18:12:42 +0000
committermirandac@chromium.org <mirandac@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-06-28 18:12:42 +0000
commitc818d6f97d3470af453587298b138bd4d0040295 (patch)
tree178564507157213b2efa45eeb508c121588bcd81 /content
parenta06afcf63937e613cff0eb3b0abd1aa33f02f898 (diff)
downloadchromium_src-c818d6f97d3470af453587298b138bd4d0040295.zip
chromium_src-c818d6f97d3470af453587298b138bd4d0040295.tar.gz
chromium_src-c818d6f97d3470af453587298b138bd4d0040295.tar.bz2
Change the WebResourceService to use SystemURLRequestContext, and fix leak that this change exposes, by ensuring that the SystemURLRequestContextGetter is deleted immediately once it is no longer needed.
BUG=86168 TEST=browser_tests complete on all bots with no leak_tracker firings. Review URL: http://codereview.chromium.org/7044118 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@90799 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content')
-rw-r--r--content/common/url_fetcher.cc126
-rw-r--r--content/common/url_fetcher.h15
-rw-r--r--content/common/url_fetcher_unittest.cc6
3 files changed, 86 insertions, 61 deletions
diff --git a/content/common/url_fetcher.cc b/content/common/url_fetcher.cc
index 0fb1f0d..d23ef10 100644
--- a/content/common/url_fetcher.cc
+++ b/content/common/url_fetcher.cc
@@ -172,7 +172,8 @@ class URLFetcher::Core
void StartURLRequest();
void StartURLRequestWhenAppropriate();
void CancelURLRequest();
- void OnCompletedURLRequest(const net::URLRequestStatus& status);
+ void OnCompletedURLRequest(const net::URLRequestStatus& status,
+ base::TimeDelta backoff_delay);
void InformDelegateFetchIsComplete();
void NotifyMalformedContent();
@@ -257,10 +258,6 @@ class URLFetcher::Core
// True if the URLFetcher has been cancelled.
bool was_cancelled_;
- // Since GetBackoffReleaseTime() can only be called on the IO thread, we cache
- // its value to be used by OnCompletedURLRequest on the creating thread.
- base::TimeTicks backoff_release_time_;
-
// If writing results to a file, |temp_file_writer_| will manage creation,
// writing, and destruction of that file.
scoped_ptr<TempFileWriter> temp_file_writer_;
@@ -268,6 +265,14 @@ class URLFetcher::Core
// Where should responses be saved?
ResponseDestinationType response_destination_;
+ // If |automatically_retry_on_5xx_| is false, 5xx responses will be
+ // propagated to the observer, if it is true URLFetcher will automatically
+ // re-execute the request, after the back-off delay has expired.
+ // true by default.
+ bool automatically_retry_on_5xx_;
+ // Maximum retries allowed.
+ int max_retries_;
+
static base::LazyInstance<Registry> g_registry;
friend class URLFetcher;
@@ -442,9 +447,7 @@ URLFetcher::URLFetcher(const GURL& url,
RequestType request_type,
Delegate* d)
: ALLOW_THIS_IN_INITIALIZER_LIST(
- core_(new Core(this, url, request_type, d))),
- automatically_retry_on_5xx_(true),
- max_retries_(0) {
+ core_(new Core(this, url, request_type, d))) {
}
URLFetcher::~URLFetcher() {
@@ -476,7 +479,9 @@ URLFetcher::Core::Core(URLFetcher* fetcher,
is_chunked_upload_(false),
num_retries_(0),
was_cancelled_(false),
- response_destination_(STRING) {
+ response_destination_(STRING),
+ automatically_retry_on_5xx_(true),
+ max_retries_(0) {
}
URLFetcher::Core::~Core() {
@@ -488,7 +493,12 @@ URLFetcher::Core::~Core() {
void URLFetcher::Core::Start() {
DCHECK(delegate_loop_proxy_);
CHECK(request_context_getter_) << "We need an URLRequestContext!";
- io_message_loop_proxy_ = request_context_getter_->GetIOMessageLoopProxy();
+ if (io_message_loop_proxy_) {
+ DCHECK_EQ(io_message_loop_proxy_,
+ request_context_getter_->GetIOMessageLoopProxy());
+ } else {
+ io_message_loop_proxy_ = request_context_getter_->GetIOMessageLoopProxy();
+ }
CHECK(io_message_loop_proxy_.get()) << "We need an IO message loop proxy";
switch (response_destination_) {
@@ -567,7 +577,6 @@ void URLFetcher::Core::AppendChunkToUpload(const std::string& content,
NewRunnableMethod(this, &Core::CompleteAddingUploadDataChunk, content,
is_last_chunk));
}
-
// Return true if the write was done and reading may continue.
// Return false if the write is pending, and the next read will
// be done later.
@@ -614,23 +623,51 @@ void URLFetcher::Core::OnReadCompleted(net::URLRequest* request,
}
} while (request_->Read(buffer_, kBufferSize, &bytes_read));
- if (request_->status().is_success())
+ const net::URLRequestStatus status = request_->status();
+
+ if (status.is_success())
request_->GetResponseCookies(&cookies_);
// See comments re: HEAD requests in ReadResponse().
- if ((!request_->status().is_io_pending() && !waiting_on_write) ||
+ if ((!status.is_io_pending() && !waiting_on_write) ||
(request_type_ == HEAD)) {
- backoff_release_time_ = GetBackoffReleaseTime();
+ base::TimeTicks backoff_release_time = GetBackoffReleaseTime();
+ ReleaseRequest();
+
+ base::TimeDelta backoff_delay;
+ // Checks the response from server.
+ if (response_code_ >= 500 ||
+ status.os_error() == net::ERR_TEMPORARILY_THROTTLED) {
+ // When encountering a server error, we will send the request again
+ // after backoff time.
+ ++num_retries_;
+ // Note that backoff_delay_ may be 0 because (a) the URLRequestThrottler
+ // code does not necessarily back off on the first error, and (b) it
+ // only backs off on some of the 5xx status codes.
+ backoff_delay = backoff_release_time - base::TimeTicks::Now();
+ if (backoff_delay < base::TimeDelta())
+ backoff_delay = base::TimeDelta();
+
+ if (automatically_retry_on_5xx_ &&
+ num_retries_ <= max_retries_) {
+ StartURLRequestWhenAppropriate();
+ return;
+ }
+ } else {
+ backoff_delay = base::TimeDelta();
+ }
bool posted = delegate_loop_proxy_->PostTask(
FROM_HERE,
NewRunnableMethod(this,
&Core::OnCompletedURLRequest,
- request_->status()));
+ status,
+ backoff_delay));
+
// If the delegate message loop does not exist any more, then the delegate
// should be gone too.
DCHECK(posted || !delegate_);
- ReleaseRequest();
+ request_context_getter_ = NULL;
}
}
@@ -749,41 +786,15 @@ void URLFetcher::Core::CancelURLRequest() {
}
void URLFetcher::Core::OnCompletedURLRequest(
- const net::URLRequestStatus& status) {
+ const net::URLRequestStatus& status,
+ base::TimeDelta backoff_delay) {
DCHECK(delegate_loop_proxy_->BelongsToCurrentThread());
- // Save the status so that delegates can read it.
- status_ = status;
-
- // Checks the response from server.
- if (response_code_ >= 500 ||
- status.os_error() == net::ERR_TEMPORARILY_THROTTLED) {
- // When encountering a server error, we will send the request again
- // after backoff time.
- ++num_retries_;
- // Restarts the request if we still need to notify the delegate.
- if (delegate_) {
- // Note that backoff_delay_ may be 0 because (a) the URLRequestThrottler
- // code does not necessarily back off on the first error, and (b) it
- // only backs off on some of the 5xx status codes.
- fetcher_->backoff_delay_ = backoff_release_time_ - base::TimeTicks::Now();
- if (fetcher_->backoff_delay_ < base::TimeDelta())
- fetcher_->backoff_delay_ = base::TimeDelta();
-
- if (fetcher_->automatically_retry_on_5xx_ &&
- num_retries_ <= fetcher_->max_retries()) {
- io_message_loop_proxy_->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &Core::StartURLRequestWhenAppropriate));
- } else {
- InformDelegateFetchIsComplete();
- }
- }
- } else {
- if (delegate_) {
- fetcher_->backoff_delay_ = base::TimeDelta();
- InformDelegateFetchIsComplete();
- }
+ // Save the status and backoff_delay so that delegates can read it.
+ if (delegate_) {
+ status_ = status;
+ fetcher_->backoff_delay_ = backoff_delay;
+ InformDelegateFetchIsComplete();
}
}
@@ -879,11 +890,20 @@ void URLFetcher::GetExtraRequestHeaders(net::HttpRequestHeaders* headers) {
void URLFetcher::set_request_context(
net::URLRequestContextGetter* request_context_getter) {
+ DCHECK(!core_->request_context_getter_);
core_->request_context_getter_ = request_context_getter;
}
void URLFetcher::set_automatically_retry_on_5xx(bool retry) {
- automatically_retry_on_5xx_ = retry;
+ core_->automatically_retry_on_5xx_ = retry;
+}
+
+int URLFetcher::max_retries() const {
+ return core_->max_retries_;
+}
+
+void URLFetcher::set_max_retries(int max_retries) {
+ core_->max_retries_ = max_retries;
}
void URLFetcher::SaveResponseToTemporaryFile(
@@ -911,6 +931,12 @@ void URLFetcher::Start() {
core_->Start();
}
+void URLFetcher::StartWithRequestContextGetter(
+ net::URLRequestContextGetter* request_context_getter) {
+ set_request_context(request_context_getter);
+ core_->Start();
+}
+
const GURL& URLFetcher::url() const {
return core_->url_;
}
diff --git a/content/common/url_fetcher.h b/content/common/url_fetcher.h
index 64aa3ea..0eaa7c7 100644
--- a/content/common/url_fetcher.h
+++ b/content/common/url_fetcher.h
@@ -187,9 +187,9 @@ class URLFetcher {
// after backoff_delay() elapses. URLFetcher has it set to true by default.
void set_automatically_retry_on_5xx(bool retry);
- int max_retries() const { return max_retries_; }
+ int max_retries() const;
- void set_max_retries(int max_retries) { max_retries_ = max_retries; }
+ void set_max_retries(int max_retries);
// Returns the back-off delay before the request will be retried,
// when a 5xx response was received.
@@ -226,6 +226,10 @@ class URLFetcher {
// settings.
virtual void Start();
+ // Restarts the URLFetcher with a new URLRequestContextGetter.
+ void StartWithRequestContextGetter(
+ net::URLRequestContextGetter* request_context_getter);
+
// Return the URL that this fetcher is processing.
virtual const GURL& url() const;
@@ -302,15 +306,8 @@ class URLFetcher {
static Factory* factory_;
- // If |automatically_retry_on_5xx_| is false, 5xx responses will be
- // propagated to the observer, if it is true URLFetcher will automatically
- // re-execute the request, after the back-off delay has expired.
- // true by default.
- bool automatically_retry_on_5xx_;
// Back-off time delay. 0 by default.
base::TimeDelta backoff_delay_;
- // Maximum retries allowed.
- int max_retries_;
static bool g_interception_enabled;
diff --git a/content/common/url_fetcher_unittest.cc b/content/common/url_fetcher_unittest.cc
index b2c9979..040392b 100644
--- a/content/common/url_fetcher_unittest.cc
+++ b/content/common/url_fetcher_unittest.cc
@@ -488,7 +488,8 @@ void URLFetcherProtectTest::OnURLFetchComplete(
static int count = 0;
count++;
if (count < 20) {
- fetcher_->Start();
+ fetcher_->StartWithRequestContextGetter(new TestURLRequestContextGetter(
+ io_message_loop_proxy()));
} else {
// We have already sent 20 requests continuously. And we expect that
// it takes more than 1 second due to the overload protection settings.
@@ -613,7 +614,8 @@ void URLFetcherMultipleAttemptTest::OnURLFetchComplete(
EXPECT_FALSE(data.empty());
if (!data.empty() && data_.empty()) {
data_ = data;
- fetcher_->Start();
+ fetcher_->StartWithRequestContextGetter(
+ new TestURLRequestContextGetter(io_message_loop_proxy()));
} else {
EXPECT_EQ(data, data_);
delete fetcher_; // Have to delete this here and not in the destructor,