summaryrefslogtreecommitdiffstats
path: root/net/url_request
diff options
context:
space:
mode:
authorbattre@chromium.org <battre@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-10-05 13:12:51 +0000
committerbattre@chromium.org <battre@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-10-05 13:12:51 +0000
commitea8141e730305034523a814c3801852795bf0bad (patch)
treeb2955debe382038efeadab26314fff84c3f0cd93 /net/url_request
parentec1a9465933f39d0e462e252c0981379f494b6fc (diff)
downloadchromium_src-ea8141e730305034523a814c3801852795bf0bad.zip
chromium_src-ea8141e730305034523a814c3801852795bf0bad.tar.gz
chromium_src-ea8141e730305034523a814c3801852795bf0bad.tar.bz2
Bocking onReceivedHeaders signal for webRequest API
This is a new signal that allows extensions to - Modify/Delete cookies of HTTP responses before they are seen by the cookie monster. - Cancel requests based on the length or content type of the HTTP response. BUG=89118 TEST=no Review URL: http://codereview.chromium.org/7931026 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@104091 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r--net/url_request/url_request.cc13
-rw-r--r--net/url_request/url_request_http_job.cc115
-rw-r--r--net/url_request/url_request_http_job.h25
-rw-r--r--net/url_request/url_request_job.cc3
-rw-r--r--net/url_request/url_request_job.h6
-rw-r--r--net/url_request/url_request_test_util.cc37
-rw-r--r--net/url_request/url_request_test_util.h10
7 files changed, 161 insertions, 48 deletions
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index acc085e..f6af23b 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -160,8 +160,11 @@ URLRequest::URLRequest(const GURL& url, Delegate* delegate)
URLRequest::~URLRequest() {
Cancel();
- if (context_ && context_->network_delegate())
+ if (context_ && context_->network_delegate()) {
context_->network_delegate()->NotifyURLRequestDestroyed(this);
+ if (job_)
+ job_->NotifyURLRequestDestroyed();
+ }
if (job_)
OrphanJob();
@@ -655,6 +658,14 @@ void URLRequest::PrepareToRestart() {
}
void URLRequest::OrphanJob() {
+ // When calling this function, please check that URLRequestHttpJob is
+ // not in between calling NetworkDelegate::NotifyHeadersReceived receiving
+ // the call back. This is currently guaranteed by the following strategies:
+ // - OrphanJob is called on JobRestart, in this case the URLRequestJob cannot
+ // be receiving any headers at that time.
+ // - OrphanJob is called in ~URLRequest, in this case
+ // NetworkDelegate::NotifyURLRequestDestroyed notifies the NetworkDelegate
+ // that the callback becomes invalid.
job_->Kill();
job_->DetachRequest(); // ensures that the job will not call us again
job_ = NULL;
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index 08b7762..36dd61a 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -45,13 +45,6 @@
static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
-// When histogramming results related to SDCH and/or an SDCH latency test, the
-// number of packets for which we need to record arrival times so as to
-// calculate interpacket latencies. We currently are only looking at the
-// first few packets, as we're monitoring the impact of the initial TCP
-// congestion window on stalling of transmissions.
-static const size_t kSdchPacketHistogramCount = 5;
-
namespace net {
namespace {
@@ -234,7 +227,11 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
ALLOW_THIS_IN_INITIALIZER_LIST(
filter_context_(new HttpFilterContext(this))),
ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)),
- weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+ weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ on_headers_received_callback_(
+ this, &URLRequestHttpJob::OnHeadersReceivedCallback)),
+ awaiting_callback_(false) {
ResetTimer();
}
@@ -248,8 +245,7 @@ void URLRequestHttpJob::NotifyHeadersComplete() {
is_cached_content_ = response_info_->was_cached;
if (!is_cached_content_) {
- URLRequestThrottlerHeaderAdapter response_adapter(
- response_info_->headers);
+ URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
throttling_entry_->UpdateWithResponse(request_info_.url.host(),
&response_adapter);
}
@@ -267,7 +263,7 @@ void URLRequestHttpJob::NotifyHeadersComplete() {
// require multiple suggestions before we get additional ones for this site.
// Eventually we should wait until a dictionary is requested several times
// before we even download it (so that we don't waste memory or bandwidth).
- if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) {
+ if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
// request_->url() won't be valid in the destructor, so we use an
// alternate copy.
DCHECK_EQ(request_->url(), request_info_.url);
@@ -526,7 +522,14 @@ void URLRequestHttpJob::DoStartTransaction() {
}
}
-void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() {
+void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
+ if (result != net::OK) {
+ request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
+ make_scoped_refptr(new NetLogStringParameter("source", "delegate")));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+ return;
+ }
+
DCHECK(transaction_.get());
const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
@@ -535,7 +538,7 @@ void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() {
response_cookies_.clear();
response_cookies_save_index_ = 0;
- FetchResponseCookies(response_info, &response_cookies_);
+ FetchResponseCookies(&response_cookies_);
// Now, loop over the response cookies, and attempt to persist each.
SaveNextCookie();
@@ -586,13 +589,13 @@ void URLRequestHttpJob::CookieHandled() {
}
void URLRequestHttpJob::FetchResponseCookies(
- const HttpResponseInfo* response_info,
std::vector<std::string>* cookies) {
std::string name = "Set-Cookie";
std::string value;
void* iter = NULL;
- while (response_info->headers->EnumerateHeader(&iter, name, &value)) {
+ HttpResponseHeaders* headers = GetResponseHeaders();
+ while (headers->EnumerateHeader(&iter, name, &value)) {
if (!value.empty())
cookies->push_back(value);
}
@@ -615,8 +618,10 @@ void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
int max_age;
bool include_subdomains;
+ HttpResponseHeaders* headers = GetResponseHeaders();
+
void* iter = NULL;
- while (response_info_->headers->EnumerateHeader(&iter, name, &value)) {
+ while (headers->EnumerateHeader(&iter, name, &value)) {
const bool ok = TransportSecurityState::ParseHeader(
value, &max_age, &include_subdomains);
if (!ok)
@@ -683,9 +688,31 @@ void URLRequestHttpJob::OnStartCompleted(int result) {
}
}
#endif
-
if (result == OK) {
- SaveCookiesAndNotifyHeadersComplete();
+ scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
+ if (request_->context() && request_->context()->network_delegate()) {
+ // Note that |this| may not be deleted until
+ // |on_headers_received_callback_| or
+ // |NetworkDelegate::URLRequestDestroyed()| has been called.
+ int error = request_->context()->network_delegate()->
+ NotifyHeadersReceived(request_, &on_headers_received_callback_,
+ headers, &override_response_headers_);
+ if (error != net::OK) {
+ if (error == net::ERR_IO_PENDING) {
+ awaiting_callback_ = true;
+ request_->net_log().BeginEvent(
+ NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE, NULL);
+ } else {
+ request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
+ make_scoped_refptr(
+ new NetLogStringParameter("source", "delegate")));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
+ }
+ return;
+ }
+ }
+
+ SaveCookiesAndNotifyHeadersComplete(net::OK);
} else if (IsCertificateError(result)) {
// We encountered an SSL certificate error. Ask our delegate to decide
// what we should do.
@@ -706,6 +733,13 @@ void URLRequestHttpJob::OnStartCompleted(int result) {
}
}
+void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
+ request_->net_log().EndEvent(
+ NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE, NULL);
+ awaiting_callback_ = false;
+ SaveCookiesAndNotifyHeadersComplete(result);
+}
+
void URLRequestHttpJob::OnReadCompleted(int result) {
read_in_progress_ = false;
@@ -739,8 +773,7 @@ void URLRequestHttpJob::RestartTransactionWithAuth(
// Update the cookies, since the cookie store may have been updated from the
// headers in the 401/407. Since cookies were already appended to
// extra_headers, we need to strip them out before adding them again.
- request_info_.extra_headers.RemoveHeader(
- HttpRequestHeaders::kCookie);
+ request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
AddCookieHeaderAndStart();
}
@@ -814,7 +847,7 @@ bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
if (!response_info_)
return false;
- return response_info_->headers->GetMimeType(mime_type);
+ return GetResponseHeaders()->GetMimeType(mime_type);
}
bool URLRequestHttpJob::GetCharset(std::string* charset) {
@@ -823,19 +856,21 @@ bool URLRequestHttpJob::GetCharset(std::string* charset) {
if (!response_info_)
return false;
- return response_info_->headers->GetCharset(charset);
+ return GetResponseHeaders()->GetCharset(charset);
}
void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
DCHECK(request_);
DCHECK(transaction_.get());
- if (response_info_)
+ if (response_info_) {
*info = *response_info_;
+ if (override_response_headers_)
+ info->headers = override_response_headers_;
+ }
}
-bool URLRequestHttpJob::GetResponseCookies(
- std::vector<std::string>* cookies) {
+bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
DCHECK(transaction_.get());
if (!response_info_)
@@ -845,7 +880,7 @@ bool URLRequestHttpJob::GetResponseCookies(
// should just leverage response_cookies_.
cookies->clear();
- FetchResponseCookies(response_info_, cookies);
+ FetchResponseCookies(cookies);
return true;
}
@@ -855,7 +890,7 @@ int URLRequestHttpJob::GetResponseCode() const {
if (!response_info_)
return -1;
- return response_info_->headers->response_code();
+ return GetResponseHeaders()->response_code();
}
Filter* URLRequestHttpJob::SetupFilter() const {
@@ -865,9 +900,9 @@ Filter* URLRequestHttpJob::SetupFilter() const {
std::vector<Filter::FilterType> encoding_types;
std::string encoding_type;
+ HttpResponseHeaders* headers = GetResponseHeaders();
void* iter = NULL;
- while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding",
- &encoding_type)) {
+ while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
}
@@ -879,8 +914,8 @@ Filter* URLRequestHttpJob::SetupFilter() const {
// encoded).
std::string sdch_response_status;
iter = NULL;
- while (response_info_->headers->EnumerateHeader(&iter, "X-Sdch-Encode",
- &sdch_response_status)) {
+ while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
+ &sdch_response_status)) {
if (sdch_response_status == "0") {
filter_context_->ResetSdchResponseToFalse();
break;
@@ -950,8 +985,8 @@ void URLRequestHttpJob::GetAuthChallengeInfo(
// sanity checks:
DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
server_auth_state_ == AUTH_STATE_NEED_AUTH);
- DCHECK(response_info_->headers->response_code() == 401 ||
- response_info_->headers->response_code() == 407);
+ DCHECK(GetResponseHeaders()->response_code() == 401 ||
+ GetResponseHeaders()->response_code() == 407);
*result = response_info_->auth_challenge;
}
@@ -1115,6 +1150,8 @@ HostPortPair URLRequestHttpJob::GetSocketAddress() const {
}
URLRequestHttpJob::~URLRequestHttpJob() {
+ CHECK(!awaiting_callback_);
+
DCHECK(!sdch_test_control_ || !sdch_test_activated_);
if (!is_cached_content_) {
if (sdch_test_control_)
@@ -1378,4 +1415,16 @@ void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
RecordCompressionHistograms();
}
+HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
+ DCHECK(transaction_.get());
+ DCHECK(transaction_->GetResponseInfo());
+ return override_response_headers_.get() ?
+ override_response_headers_ :
+ transaction_->GetResponseInfo()->headers;
+}
+
+void URLRequestHttpJob::NotifyURLRequestDestroyed() {
+ awaiting_callback_ = false;
+}
+
} // namespace net
diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h
index 7b09dd4..d802186 100644
--- a/net/url_request/url_request_http_job.h
+++ b/net/url_request/url_request_http_job.h
@@ -9,6 +9,7 @@
#include <string>
#include <vector>
+#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/string16.h"
@@ -23,6 +24,7 @@
namespace net {
+class HttpResponseHeaders;
class HttpResponseInfo;
class HttpTransaction;
class URLRequestContext;
@@ -47,14 +49,15 @@ class URLRequestHttpJob : public URLRequestJob {
void AddExtraHeaders();
void AddCookieHeaderAndStart();
- void SaveCookiesAndNotifyHeadersComplete();
+ void SaveCookiesAndNotifyHeadersComplete(int result);
void SaveNextCookie();
- void FetchResponseCookies(const HttpResponseInfo* response_info,
- std::vector<std::string>* cookies);
+ void FetchResponseCookies(std::vector<std::string>* cookies);
// Process the Strict-Transport-Security header, if one exists.
void ProcessStrictTransportSecurityHeader();
+ // |result| should be net::OK, or the request is canceled.
+ void OnHeadersReceivedCallback(int result);
void OnStartCompleted(int result);
void OnReadCompleted(int result);
void NotifyBeforeSendHeadersCallback(int result);
@@ -89,6 +92,7 @@ class URLRequestHttpJob : public URLRequestJob {
virtual void StopCaching() OVERRIDE;
virtual void DoneReading() OVERRIDE;
virtual HostPortPair GetSocketAddress() const OVERRIDE;
+ virtual void NotifyURLRequestDestroyed() OVERRIDE;
// Keep a reference to the url request context to be sure it's not deleted
// before us.
@@ -180,6 +184,10 @@ class URLRequestHttpJob : public URLRequestJob {
// of bytes read or, if negative, an error code.
bool ShouldFixMismatchedContentLength(int rv) const;
+ // Returns the effective response headers, considering that they may be
+ // overridden by |override_response_headers_|.
+ HttpResponseHeaders* GetResponseHeaders() const;
+
base::Time request_creation_time_;
// Data used for statistics gathering. This data is only used for histograms
@@ -214,6 +222,17 @@ class URLRequestHttpJob : public URLRequestJob {
ScopedRunnableMethodFactory<URLRequestHttpJob> method_factory_;
base::WeakPtrFactory<URLRequestHttpJob> weak_ptr_factory_;
+ OldCompletionCallbackImpl<URLRequestHttpJob> on_headers_received_callback_;
+
+ scoped_refptr<HttpResponseHeaders> override_response_headers_;
+
+ // Flag used to verify that |this| is not deleted while we are awaiting
+ // a callback from the NetworkDelegate. Used as a fail-fast mechanism.
+ // True if we are waiting a callback and
+ // NetworkDelegate::NotifyURLRequestDestroyed has not been called, yet,
+ // to inform the NetworkDelegate that it may not call back.
+ bool awaiting_callback_;
+
DISALLOW_COPY_AND_ASSIGN(URLRequestHttpJob);
};
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 2a23d61..8390d8c 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -214,6 +214,9 @@ void URLRequestJob::OnSuspend() {
Kill();
}
+void URLRequestJob::NotifyURLRequestDestroyed() {
+}
+
URLRequestJob::~URLRequestJob() {
base::SystemMonitor* system_monitor = base::SystemMonitor::Get();
if (system_monitor)
diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h
index 4231ddd..c219a3a 100644
--- a/net/url_request/url_request_job.h
+++ b/net/url_request/url_request_job.h
@@ -190,6 +190,12 @@ class NET_EXPORT URLRequestJob : public base::RefCounted<URLRequestJob>,
// We invoke URLRequestJob::Kill on suspend (crbug.com/4606).
virtual void OnSuspend();
+ // Called after a NetworkDelegate has been informed that the URLRequest
+ // will be destroyed. This is used to track that no pending callbacks
+ // exist at destruction time of the URLRequestJob, unless they have been
+ // canceled by an explicit NetworkDelegate::NotifyURLRequestDestroyed() call.
+ virtual void NotifyURLRequestDestroyed();
+
protected:
friend class base::RefCounted<URLRequestJob>;
virtual ~URLRequestJob();
diff --git a/net/url_request/url_request_test_util.cc b/net/url_request/url_request_test_util.cc
index 6d4f167..a0fd90f 100644
--- a/net/url_request/url_request_test_util.cc
+++ b/net/url_request/url_request_test_util.cc
@@ -21,13 +21,14 @@ namespace {
const int kStageBeforeURLRequest = 1 << 0;
const int kStageBeforeSendHeaders = 1 << 1;
const int kStageSendHeaders = 1 << 2;
-const int kStageAuthRequired = 1 << 3;
-const int kStageBeforeRedirect = 1 << 4;
-const int kStageResponseStarted = 1 << 5;
-const int kStageCompletedSuccess = 1 << 6;
-const int kStageCompletedError = 1 << 7;
-const int kStageURLRequestDestroyed = 1 << 8;
-const int kStageDestruction = 1 << 9;
+const int kStageHeadersReceived = 1 << 3;
+const int kStageAuthRequired = 1 << 4;
+const int kStageBeforeRedirect = 1 << 5;
+const int kStageResponseStarted = 1 << 6;
+const int kStageCompletedSuccess = 1 << 7;
+const int kStageCompletedError = 1 << 8;
+const int kStageURLRequestDestroyed = 1 << 9;
+const int kStageDestruction = 1 << 10;
} // namespace
@@ -372,6 +373,21 @@ void TestNetworkDelegate::OnSendHeaders(
EXPECT_TRUE(next_states_[req_id] & kStageSendHeaders) <<
event_order_[req_id];
next_states_[req_id] =
+ kStageHeadersReceived |
+ kStageCompletedError;
+}
+
+int TestNetworkDelegate::OnHeadersReceived(
+ net::URLRequest* request,
+ net::OldCompletionCallback* callback,
+ net::HttpResponseHeaders* original_response_headers,
+ scoped_refptr<net::HttpResponseHeaders>* override_response_headers) {
+ int req_id = request->identifier();
+ event_order_[req_id] += "OnHeadersReceived\n";
+ InitRequestStatesIfNew(req_id);
+ EXPECT_TRUE(next_states_[req_id] & kStageHeadersReceived) <<
+ event_order_[req_id];
+ next_states_[req_id] =
kStageBeforeRedirect |
kStageResponseStarted |
kStageAuthRequired |
@@ -380,6 +396,8 @@ void TestNetworkDelegate::OnSendHeaders(
// Basic authentication sends a second request from the URLRequestHttpJob
// layer before the URLRequest reports that a response has started.
next_states_[req_id] |= kStageBeforeSendHeaders;
+
+ return net::OK;
}
void TestNetworkDelegate::OnBeforeRedirect(net::URLRequest* request,
@@ -464,8 +482,9 @@ net::NetworkDelegate::AuthRequiredResponse TestNetworkDelegate::OnAuthRequired(
EXPECT_TRUE(next_states_[req_id] & kStageAuthRequired) <<
event_order_[req_id];
next_states_[req_id] = kStageBeforeSendHeaders |
+ kStageHeadersReceived | // Request canceled by delegate simulates empty
+ // response.
kStageResponseStarted | // data: URLs do not trigger sending headers
- kStageBeforeRedirect | // a delegate can trigger a redirection
- kStageCompletedError; // request canceled by delegate
+ kStageBeforeRedirect; // a delegate can trigger a redirection
return net::NetworkDelegate::AUTH_REQUIRED_RESPONSE_NO_ACTION;
}
diff --git a/net/url_request/url_request_test_util.h b/net/url_request/url_request_test_util.h
index 3446b59..56a8973 100644
--- a/net/url_request/url_request_test_util.h
+++ b/net/url_request/url_request_test_util.h
@@ -194,12 +194,18 @@ class TestNetworkDelegate : public net::NetworkDelegate {
// net::NetworkDelegate:
virtual int OnBeforeURLRequest(net::URLRequest* request,
net::OldCompletionCallback* callback,
- GURL* new_url);
+ GURL* new_url) OVERRIDE;
virtual int OnBeforeSendHeaders(net::URLRequest* request,
net::OldCompletionCallback* callback,
- net::HttpRequestHeaders* headers);
+ net::HttpRequestHeaders* headers) OVERRIDE;
virtual void OnSendHeaders(net::URLRequest* request,
const net::HttpRequestHeaders& headers) OVERRIDE;
+ virtual int OnHeadersReceived(
+ net::URLRequest* request,
+ net::OldCompletionCallback* callback,
+ net::HttpResponseHeaders* original_response_headers,
+ scoped_refptr<net::HttpResponseHeaders>* override_response_headers)
+ OVERRIDE;
virtual void OnBeforeRedirect(net::URLRequest* request,
const GURL& new_location) OVERRIDE;
virtual void OnResponseStarted(net::URLRequest* request) OVERRIDE;