summaryrefslogtreecommitdiffstats
path: root/net/url_request
diff options
context:
space:
mode:
authorinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 22:42:52 +0000
committerinitial.commit <initial.commit@0039d316-1c4b-4281-b951-d872f2087c98>2008-07-26 22:42:52 +0000
commit586acc5fe142f498261f52c66862fa417c3d52d2 (patch)
treec98b3417a883f2477029c8cd5888f4078681e24e /net/url_request
parenta814a8d55429605fe6d7045045cd25b6bf624580 (diff)
downloadchromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.zip
chromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.tar.gz
chromium_src-586acc5fe142f498261f52c66862fa417c3d52d2.tar.bz2
Add net to the repository.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@14 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r--net/url_request/mime_sniffer_proxy.cc94
-rw-r--r--net/url_request/mime_sniffer_proxy.h100
-rw-r--r--net/url_request/url_request.cc360
-rw-r--r--net/url_request/url_request.h542
-rw-r--r--net/url_request/url_request_about_job.cc62
-rw-r--r--net/url_request/url_request_about_job.h49
-rw-r--r--net/url_request/url_request_context.h105
-rw-r--r--net/url_request/url_request_error_job.cc46
-rw-r--r--net/url_request/url_request_error_job.h49
-rw-r--r--net/url_request/url_request_file_dir_job.cc207
-rw-r--r--net/url_request/url_request_file_dir_job.h85
-rw-r--r--net/url_request/url_request_file_job.cc349
-rw-r--r--net/url_request/url_request_file_job.h98
-rw-r--r--net/url_request/url_request_filter.cc156
-rw-r--r--net/url_request/url_request_filter.h105
-rw-r--r--net/url_request/url_request_ftp_job.cc547
-rw-r--r--net/url_request/url_request_ftp_job.h133
-rw-r--r--net/url_request/url_request_http_cache_job.cc539
-rw-r--r--net/url_request/url_request_http_cache_job.h112
-rw-r--r--net/url_request/url_request_inet_job.cc462
-rw-r--r--net/url_request/url_request_inet_job.h184
-rw-r--r--net/url_request/url_request_job.cc497
-rw-r--r--net/url_request/url_request_job.h336
-rw-r--r--net/url_request/url_request_job_manager.cc178
-rw-r--r--net/url_request/url_request_job_manager.h100
-rw-r--r--net/url_request/url_request_job_metrics.cc57
-rw-r--r--net/url_request/url_request_job_metrics.h74
-rw-r--r--net/url_request/url_request_job_tracker.cc82
-rw-r--r--net/url_request/url_request_job_tracker.h117
-rw-r--r--net/url_request/url_request_simple_job.cc81
-rw-r--r--net/url_request/url_request_simple_job.h60
-rw-r--r--net/url_request/url_request_status.h91
-rw-r--r--net/url_request/url_request_test_job.cc204
-rw-r--r--net/url_request/url_request_test_job.h110
-rw-r--r--net/url_request/url_request_unittest.cc792
-rw-r--r--net/url_request/url_request_unittest.h380
-rw-r--r--net/url_request/url_request_view_cache_job.cc194
-rw-r--r--net/url_request/url_request_view_cache_job.h56
38 files changed, 7793 insertions, 0 deletions
diff --git a/net/url_request/mime_sniffer_proxy.cc b/net/url_request/mime_sniffer_proxy.cc
new file mode 100644
index 0000000..2a0dec6
--- /dev/null
+++ b/net/url_request/mime_sniffer_proxy.cc
@@ -0,0 +1,94 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/mime_sniffer_proxy.h"
+
+#include "net/base/mime_sniffer.h"
+
+MimeSnifferProxy::MimeSnifferProxy(URLRequest* request,
+ URLRequest::Delegate* delegate)
+ : request_(request), delegate_(delegate),
+ sniff_content_(false), error_(false) {
+ request->set_delegate(this);
+}
+
+void MimeSnifferProxy::OnResponseStarted(URLRequest* request) {
+ if (request->status().is_success()) {
+ request->GetMimeType(&mime_type_);
+ if (mime_util::ShouldSniffMimeType(request->url(), mime_type_)) {
+ // We need to read content before we know the mime type,
+ // so we don't call OnResponseStarted.
+ sniff_content_ = true;
+ if (request_->Read(buf_, sizeof(buf_), &bytes_read_) && bytes_read_) {
+ OnReadCompleted(request, bytes_read_);
+ } else if (!request_->status().is_io_pending()) {
+ error_ = true;
+ delegate_->OnResponseStarted(request);
+ } // Otherwise, IO pending. Wait for OnReadCompleted.
+ return;
+ }
+ }
+ delegate_->OnResponseStarted(request);
+}
+
+bool MimeSnifferProxy::Read(char* buf, int max_bytes, int *bytes_read) {
+ if (sniff_content_) {
+ // This is the first call to Read() after we've sniffed content.
+ // Return our local buffer or the error we ran into.
+ sniff_content_ = false; // We're done with sniffing for this request.
+
+ if (error_) {
+ *bytes_read = 0;
+ return false;
+ }
+
+ memcpy(buf, buf_, bytes_read_);
+ *bytes_read = bytes_read_;
+ return true;
+ }
+ return request_->Read(buf, max_bytes, bytes_read);
+}
+
+void MimeSnifferProxy::OnReadCompleted(URLRequest* request, int bytes_read) {
+ if (sniff_content_) {
+ // Our initial content-sniffing Read() has completed.
+ if (request->status().is_success() && bytes_read) {
+ std::string type_hint;
+ request_->GetMimeType(&type_hint);
+ bytes_read_ = bytes_read;
+ mime_util::SniffMimeType(buf_, bytes_read_,
+ request_->url(), type_hint, &mime_type_);
+ } else {
+ error_ = true;
+ }
+ delegate_->OnResponseStarted(request_);
+ return;
+ }
+ delegate_->OnReadCompleted(request, bytes_read);
+}
diff --git a/net/url_request/mime_sniffer_proxy.h b/net/url_request/mime_sniffer_proxy.h
new file mode 100644
index 0000000..7246e46
--- /dev/null
+++ b/net/url_request/mime_sniffer_proxy.h
@@ -0,0 +1,100 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// MimeSnifferProxy wraps an URLRequest to use mime_util's MIME
+// sniffer to better report the content's MIME type.
+// It only supports a subset of the URLRequest API, and must be used together
+// with an URLRequest. Their lifetimes should be the same.
+//
+// To use it, create a normal URLRequest and initialize it appropriately,
+// then insert a MimeSnifferProxy between your object and the URLRequest:
+// ms_.reset(new MimeSnifferProxy(url_request, this));
+// It then proxies URLRequest delegate callbacks (from URLRequest back into
+// your object) appropriately.
+//
+// For the other direction of calls (from your object to URLRequest), be sure
+// to use two MimeSniffed functions in place of the URLRequest functions:
+// 1) ms_->Read() -- just like URLRequest::Read()
+// 2) ms_->mime_type() -- returns the sniffed mime type of the data;
+// valid after OnResponseStarted() is called.
+
+#include "net/url_request/url_request.h"
+
+class MimeSnifferProxy : public URLRequest::Delegate {
+ public:
+ // The constructor inserts this MimeSnifferProxy in between the URLRequest
+ // and the URLRequest::Delegate, so that the URLRequest's delegate callbacks
+ // first go through the MimeSnifferProxy.
+ MimeSnifferProxy(URLRequest* request, URLRequest::Delegate* delegate);
+
+ // URLRequest::Delegate implementation.
+ // These first two functions are handled specially:
+ virtual void OnResponseStarted(URLRequest* request);
+ virtual void OnReadCompleted(URLRequest* request, int bytes_read);
+ // The remaining three just proxy directly to the delegate:
+ virtual void OnReceivedRedirect(URLRequest* request,
+ const GURL& new_url) {
+ delegate_->OnReceivedRedirect(request, new_url);
+ }
+ virtual void OnAuthRequired(URLRequest* request,
+ AuthChallengeInfo* auth_info) {
+ delegate_->OnAuthRequired(request, auth_info);
+ }
+ virtual void OnSSLCertificateError(URLRequest* request,
+ int cert_error,
+ X509Certificate* cert) {
+ delegate_->OnSSLCertificateError(request, cert_error, cert);
+ }
+
+ // Wrapper around URLRequest::Read.
+ bool Read(char* buf, int max_bytes, int *bytes_read);
+
+ // Return the sniffed mime type of the request. Valid after
+ // OnResponseStarted() has been called on the delegate.
+ const std::string& mime_type() const { return mime_type_; }
+
+ private:
+ // The request underneath us.
+ URLRequest* request_;
+ // The delegate above us, that we're proxying the request to.
+ URLRequest::Delegate* delegate_;
+
+ // The (sniffed, if necessary) request mime type.
+ std::string mime_type_;
+
+ // Whether we're sniffing this request.
+ bool sniff_content_;
+ // Whether we've encountered an error on our initial Read().
+ bool error_;
+
+ // A buffer for the first bit of the request.
+ char buf_[1024];
+ // The number of bytes we've read into the buffer.
+ int bytes_read_;
+};
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
new file mode 100644
index 0000000..74619eb
--- /dev/null
+++ b/net/url_request/url_request.cc
@@ -0,0 +1,360 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request.h"
+
+#include "base/basictypes.h"
+#include "base/process_util.h"
+#include "base/singleton.h"
+#include "base/stats_counters.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/load_flags.h"
+#include "net/base/net_errors.h"
+#include "net/base/upload_data.h"
+#include "net/url_request/url_request_job.h"
+#include "net/url_request/url_request_job_manager.h"
+
+#ifndef NDEBUG
+URLRequestMetrics url_request_metrics;
+#endif
+
+using net::UploadData;
+using std::string;
+using std::wstring;
+
+// Max number of http redirects to follow. Same number as gecko.
+const static int kMaxRedirects = 20;
+
+// The id of the current process. Lazily initialized.
+static int32 current_proc_id = -1;
+
+static URLRequestJobManager* GetJobManager() {
+ return Singleton<URLRequestJobManager>::get();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// URLRequest
+
+URLRequest::URLRequest(const GURL& url, Delegate* delegate)
+ : url_(url),
+ original_url_(url),
+ method_("GET"),
+ load_flags_(net::LOAD_NORMAL),
+ delegate_(delegate),
+ is_pending_(false),
+ user_data_(NULL),
+ enable_profiling_(false),
+ redirect_limit_(kMaxRedirects),
+ final_upload_progress_(0) {
+ URLREQUEST_COUNT_CTOR();
+ SIMPLE_STATS_COUNTER(L"URLRequestCount");
+ if (current_proc_id == -1)
+ base::AtomicSwap(&current_proc_id, process_util::GetCurrentProcId());
+ origin_pid_ = current_proc_id;
+}
+
+URLRequest::~URLRequest() {
+ URLREQUEST_COUNT_DTOR();
+
+ Cancel();
+
+ if (job_)
+ OrphanJob();
+
+ delete user_data_; // NULL check unnecessary for delete
+}
+
+// static
+URLRequest::ProtocolFactory* URLRequest::RegisterProtocolFactory(
+ const string& scheme, ProtocolFactory* factory) {
+ return GetJobManager()->RegisterProtocolFactory(scheme, factory);
+}
+
+// static
+void URLRequest::RegisterRequestInterceptor(Interceptor* interceptor) {
+ GetJobManager()->RegisterRequestInterceptor(interceptor);
+}
+
+// static
+void URLRequest::UnregisterRequestInterceptor(Interceptor* interceptor) {
+ GetJobManager()->UnregisterRequestInterceptor(interceptor);
+}
+
+void URLRequest::AppendBytesToUpload(const char* bytes, int bytes_len) {
+ DCHECK(bytes_len > 0 && bytes);
+ if (!upload_)
+ upload_ = new UploadData();
+ upload_->AppendBytes(bytes, bytes_len);
+}
+
+void URLRequest::AppendFileRangeToUpload(const wstring& file_path,
+ uint64 offset, uint64 length) {
+ DCHECK(file_path.length() > 0 && length > 0);
+ if (!upload_)
+ upload_ = new UploadData();
+ upload_->AppendFileRange(file_path, offset, length);
+}
+
+void URLRequest::SetExtraRequestHeaderById(int id, const string& value,
+ bool overwrite) {
+ DCHECK(!is_pending_);
+ NOTREACHED() << "implement me!";
+}
+
+void URLRequest::SetExtraRequestHeaderByName(const string& name,
+ const string& value,
+ bool overwrite) {
+ DCHECK(!is_pending_);
+ NOTREACHED() << "implement me!";
+}
+
+void URLRequest::SetExtraRequestHeaders(const string& headers) {
+ DCHECK(!is_pending_);
+ if (headers.empty()) {
+ extra_request_headers_.clear();
+ } else {
+#ifndef NDEBUG
+ size_t crlf = headers.rfind("\r\n", headers.size() - 1);
+ DCHECK(crlf != headers.size() - 2) << "headers must not end with CRLF";
+#endif
+ extra_request_headers_ = headers + "\r\n";
+ }
+
+ // NOTE: This method will likely become non-trivial once the other setters
+ // for request headers are implemented.
+}
+
+net::LoadState URLRequest::GetLoadState() const {
+ return job_ ? job_->GetLoadState() : net::LOAD_STATE_IDLE;
+}
+
+uint64 URLRequest::GetUploadProgress() const {
+ if (!job_) {
+ // We haven't started or the request was cancelled
+ return 0;
+ }
+ if (final_upload_progress_) {
+ // The first job completed and none of the subsequent series of
+ // GETs when following redirects will upload anything, so we return the
+ // cached results from the initial job, the POST.
+ return final_upload_progress_;
+ }
+ return job_->GetUploadProgress();
+}
+
+void URLRequest::GetResponseHeaderById(int id, string* value) {
+ DCHECK(job_);
+ NOTREACHED() << "implement me!";
+}
+
+void URLRequest::GetResponseHeaderByName(const string& name, string* value) {
+ DCHECK(value);
+ if (response_info_.headers) {
+ response_info_.headers->GetNormalizedHeader(name, value);
+ } else {
+ value->clear();
+ }
+}
+
+void URLRequest::GetAllResponseHeaders(string* headers) {
+ DCHECK(headers);
+ if (response_info_.headers) {
+ response_info_.headers->GetNormalizedHeaders(headers);
+ } else {
+ headers->clear();
+ }
+}
+
+bool URLRequest::GetResponseCookies(ResponseCookies* cookies) {
+ DCHECK(job_);
+ return job_->GetResponseCookies(cookies);
+}
+
+void URLRequest::GetMimeType(string* mime_type) {
+ DCHECK(job_);
+ job_->GetMimeType(mime_type);
+}
+
+void URLRequest::GetCharset(string* charset) {
+ DCHECK(job_);
+ job_->GetCharset(charset);
+}
+
+int URLRequest::GetResponseCode() {
+ DCHECK(job_);
+ return job_->GetResponseCode();
+}
+
+// static
+bool URLRequest::IsHandledProtocol(const std::string& scheme) {
+ return GetJobManager()->SupportsScheme(scheme);
+}
+
+// static
+bool URLRequest::IsHandledURL(const GURL& url) {
+ if (!url.is_valid()) {
+ // We handle error cases.
+ return true;
+ }
+
+ return IsHandledProtocol(url.scheme());
+}
+
+void URLRequest::Start() {
+ DCHECK(!is_pending_);
+ DCHECK(!job_);
+
+ job_ = GetJobManager()->CreateJob(this);
+ job_->SetExtraRequestHeaders(extra_request_headers_);
+
+ if (upload_.get())
+ job_->SetUpload(upload_.get());
+
+ is_pending_ = true;
+ response_info_.request_time = Time::Now();
+
+ // Don't allow errors to be sent from within Start().
+ // TODO(brettw) this may cause NotifyDone to be sent synchronously,
+ // we probably don't want this: they should be sent asynchronously so
+ // the caller does not get reentered.
+ job_->Start();
+}
+
+void URLRequest::Cancel() {
+ CancelWithError(net::ERR_ABORTED);
+}
+
+void URLRequest::CancelWithError(int os_error) {
+ DCHECK(os_error < 0);
+
+ // There's nothing to do if we are not waiting on a Job.
+ if (!is_pending_ || !job_)
+ return;
+
+ // If the URL request already has an error status, then canceling is a no-op.
+ // Plus, we don't want to change the error status once it has been set.
+ if (status_.is_success()) {
+ status_.set_status(URLRequestStatus::CANCELED);
+ status_.set_os_error(os_error);
+ }
+
+ job_->Kill();
+
+ // The Job will call our NotifyDone method asynchronously. This is done so
+ // that the Delegate implementation can call Cancel without having to worry
+ // about being called recursively.
+}
+
+bool URLRequest::Read(char* dest, int dest_size, int *bytes_read) {
+ DCHECK(job_);
+ DCHECK(bytes_read);
+ DCHECK(!job_->is_done());
+ *bytes_read = 0;
+
+ if (dest_size == 0) {
+ // Caller is not too bright. I guess we've done what they asked.
+ return true;
+ }
+
+ // Once the request fails or is cancelled, read will just return 0 bytes
+ // to indicate end of stream.
+ if (!status_.is_success()) {
+ return true;
+ }
+
+ return job_->Read(dest, dest_size, bytes_read);
+}
+
+void URLRequest::SetAuth(const wstring& username, const wstring& password) {
+ DCHECK(job_);
+ DCHECK(job_->NeedsAuth());
+
+ job_->SetAuth(username, password);
+}
+
+void URLRequest::CancelAuth() {
+ DCHECK(job_);
+ DCHECK(job_->NeedsAuth());
+
+ job_->CancelAuth();
+}
+
+void URLRequest::ContinueDespiteLastError() {
+ DCHECK(job_);
+
+ job_->ContinueDespiteLastError();
+}
+
+void URLRequest::OrphanJob() {
+ job_->DetachRequest(); // ensures that the job will not call us again
+ job_ = NULL;
+}
+
+int URLRequest::Redirect(const GURL& location, int http_status_code) {
+ // TODO(darin): treat 307 redirects of POST requests very carefully. we
+ // should prompt the user before re-submitting the POST body.
+ DCHECK(!(method_ == "POST" && http_status_code == 307)) << "implement me!";
+
+ if (redirect_limit_ <= 0) {
+ DLOG(INFO) << "disallowing redirect: exceeds limit";
+ return net::ERR_TOO_MANY_REDIRECTS;
+ }
+
+ if (!job_->IsSafeRedirect(location)) {
+ DLOG(INFO) << "disallowing redirect: unsafe protocol";
+ return net::ERR_UNSAFE_REDIRECT;
+ }
+
+ // NOTE: even though RFC 2616 says to preserve the request method when
+ // following a 302 redirect, normal browsers don't do that. instead, they
+ // all convert a POST into a GET in response to a 302, and so shall we.
+ url_ = location;
+ method_ = "GET";
+ upload_ = 0;
+ status_ = URLRequestStatus();
+ --redirect_limit_;
+
+ if (!final_upload_progress_) {
+ final_upload_progress_ = job_->GetUploadProgress();
+ }
+
+ OrphanJob();
+
+ is_pending_ = false;
+ Start();
+ return net::OK;
+}
+
+int64 URLRequest::GetExpectedContentSize() const {
+ int64 expected_content_size = -1;
+ if (job_)
+ expected_content_size = job_->expected_content_size();
+
+ return expected_content_size;
+}
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
new file mode 100644
index 0000000..89b36b0
--- /dev/null
+++ b/net/url_request/url_request.h
@@ -0,0 +1,542 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_H__
+#define BASE_URL_REQUEST_URL_REQUEST_H__
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/ref_counted.h"
+#include "base/time.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/load_states.h"
+#include "net/base/ssl_info.h"
+#include "net/base/upload_data.h"
+#include "net/base/x509_certificate.h"
+#include "net/http/http_response_info.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_status.h"
+
+class URLRequestJob;
+
+// This stores the values of the Set-Cookie headers received during the request.
+// Each item in the vector corresponds to a Set-Cookie: line received,
+// excluding the "Set-Cookie:" part.
+typedef std::vector<std::string> ResponseCookies;
+
+//-----------------------------------------------------------------------------
+// A class representing the asynchronous load of a data stream from an URL.
+//
+// The lifetime of an instance of this class is completely controlled by the
+// consumer, and the instance is not required to live on the heap or be
+// allocated in any special way. It is also valid to delete an URLRequest
+// object during the handling of a callback to its delegate. Of course, once
+// the URLRequest is deleted, no further callbacks to its delegate will occur.
+//
+// NOTE: All usage of all instances of this class should be on the same thread.
+//
+class URLRequest {
+ public:
+ // Derive from this class and add your own data members to associate extra
+ // information with a URLRequest. Use user_data() and set_user_data()
+ class UserData {
+ public:
+ UserData() {};
+ virtual ~UserData() {};
+ };
+
+ // Callback function implemented by protocol handlers to create new jobs.
+ // The factory may return NULL to indicate an error, which will cause other
+ // factories to be queried. If no factory handles the request, then the
+ // default job will be used.
+ typedef URLRequestJob* (ProtocolFactory)(URLRequest* request,
+ const std::string& scheme);
+
+ // This class handles network interception. Use with
+ // (Un)RegisterRequestInterceptor.
+ class Interceptor {
+ public:
+ virtual ~Interceptor() {}
+
+ // Called for every request made. Should return a new job to handle the
+ // request if it should be intercepted, or NULL to allow the request to
+ // be handled in the normal manner.
+ virtual URLRequestJob* MaybeIntercept(URLRequest* request) = 0;
+ };
+
+ // The delegate's methods are called from the message loop of the thread
+ // on which the request's Start() method is called. See above for the
+ // ordering of callbacks.
+ //
+ // The callbacks will be called in the following order:
+ // Start()
+ // - OnReceivedRedirect* (zero or more calls, for the number of redirects)
+ // - OnAuthRequired* (zero or more calls, for the number of
+ // authentication failures)
+ // - OnResponseStarted
+ // Read() initiated by delegate
+ // - OnReadCompleted* (zero or more calls until all data is read)
+ //
+ // Read() must be called at least once. Read() returns true when it completed
+ // immediately, and false if an IO is pending or if there is an error. When
+ // Read() returns false, the caller can check the Request's status() to see
+ // if an error occurred, or if the IO is just pending. When Read() returns
+ // true with zero bytes read, it indicates the end of the response.
+ //
+ class Delegate {
+ public:
+ // Called upon a server-initiated redirect. The delegate may call the
+ // request's Cancel method to prevent the redirect from being followed.
+ // Since there may be multiple chained redirects, there may also be more
+ // than one redirect call.
+ //
+ // When this function is called, the request will still contain the
+ // original URL, the destination of the redirect is provided in 'new_url'.
+ // If the request is not canceled the redirect will be followed and the
+ // request's URL will be changed to the new URL.
+ virtual void OnReceivedRedirect(URLRequest* request,
+ const GURL& new_url) = 0;
+
+ // Called when we receive an authentication failure. The delegate should
+ // call request->SetAuth() with the user's credentials once it obtains them,
+ // or request->CancelAuth() to cancel the login and display the error page.
+ // When it does so, the request will be reissued, restarting the sequence
+ // of On* callbacks.
+ virtual void OnAuthRequired(URLRequest* request,
+ AuthChallengeInfo* auth_info) {
+ request->CancelAuth();
+ }
+
+ // Called when using SSL and the server responds with a certificate with
+ // an error, for example, whose common name does not match the common name
+ // we were expecting for that host. The delegate should either do the
+ // safe thing and Cancel() the request or decide to proceed by calling
+ // ContinueDespiteLastError(). cert_error is a net::ERR_* error code
+ // indicating what's wrong with the certificate.
+ virtual void OnSSLCertificateError(URLRequest* request,
+ int cert_error,
+ X509Certificate* cert) {
+ request->Cancel();
+ }
+
+ // After calling Start(), the delegate will receive an OnResponseStarted
+ // callback when the request has completed. If an error occurred, the
+ // request->status() will be set. On success, all redirects have been
+ // followed and the final response is beginning to arrive. At this point,
+ // meta data about the response is available, including for example HTTP
+ // response headers if this is a request for a HTTP resource.
+ virtual void OnResponseStarted(URLRequest* request) = 0;
+
+ // Called when the a Read of the response body is completed after an
+ // IO_PENDING status from a Read() call.
+ // The data read is filled into the buffer which the caller passed
+ // to Read() previously.
+ //
+ // If an error occurred, request->status() will contain the error,
+ // and bytes read will be -1.
+ virtual void OnReadCompleted(URLRequest* request, int bytes_read) = 0;
+ };
+
+ // Initialize an URL request.
+ URLRequest(const GURL& url, Delegate* delegate);
+
+ // If destroyed after Start() has been called but while IO is pending,
+ // then the request will be effectively canceled and the delegate
+ // will not have any more of its methods called.
+ ~URLRequest();
+
+ // The user data allows the owner to associate data with this request.
+ // This request will TAKE OWNERSHIP of the given pointer, and will delete
+ // the object if it is changed or the request is destroyed.
+ UserData* user_data() const {
+ return user_data_;
+ }
+ void set_user_data(UserData* user_data) {
+ if (user_data_)
+ delete user_data_;
+ user_data_ = user_data;
+ }
+
+ // Registers a new protocol handler for the given scheme. If the scheme is
+ // already handled, this will overwrite the given factory. To delete the
+ // protocol factory, use NULL for the factory BUT this WILL NOT put back
+ // any previously registered protocol factory. It will have returned
+ // the previously registered factory (or NULL if none is registered) when
+ // the scheme was first registered so that the caller can manually put it
+ // back if desired.
+ //
+ // The scheme must be all-lowercase ASCII. See the ProtocolFactory
+ // declaration for its requirements.
+ //
+ // The registered protocol factory may return NULL, which will cause the
+ // regular "built-in" protocol factory to be used.
+ //
+ static ProtocolFactory* RegisterProtocolFactory(const std::string& scheme,
+ ProtocolFactory* factory);
+
+ // Registers or unregisters a network interception class.
+ static void RegisterRequestInterceptor(Interceptor* interceptor);
+ static void UnregisterRequestInterceptor(Interceptor* interceptor);
+
+ // Returns true if the scheme can be handled by URLRequest. False otherwise.
+ static bool IsHandledProtocol(const std::string& scheme);
+
+ // Returns true if the url can be handled by URLRequest. False otherwise.
+ // The function returns true for invalid urls because URLRequest knows how
+ // to handle those.
+ static bool IsHandledURL(const GURL& url);
+
+ // The original url is the url used to initialize the request, and it may
+ // differ from the url if the request was redirected.
+ const GURL& original_url() const { return original_url_; }
+ const GURL& url() const { return url_; }
+
+ // The URL that should be consulted for the third-party cookie blocking
+ // policy.
+ const GURL& policy_url() const { return policy_url_; }
+ void set_policy_url(const GURL& policy_url) {
+ DCHECK(!is_pending_);
+ policy_url_ = policy_url;
+ }
+
+ // The request method, as an uppercase string. "GET" is the default value.
+ // The request method may only be changed before Start() is called and
+ // should only be assigned an uppercase value.
+ const std::string& method() const { return method_; }
+ void set_method(const std::string& method) {
+ DCHECK(!is_pending_);
+ method_ = method;
+ }
+
+ // The referrer URL for the request. This header may actually be suppressed
+ // from the underlying network request for security reasons (e.g., a HTTPS
+ // URL will not be sent as the referrer for a HTTP request). The referrer
+ // may only be changed before Start() is called.
+ const std::string& referrer() const { return referrer_; }
+ void set_referrer(const std::string& referrer) {
+ DCHECK(!is_pending_);
+ referrer_ = referrer;
+ }
+
+ // The delegate of the request. This value may be changed at any time,
+ // and it is permissible for it to be null.
+ Delegate* delegate() const { return delegate_; }
+ void set_delegate(Delegate* delegate) { delegate_ = delegate; }
+
+ // The data comprising the request message body is specified as a sequence of
+ // data segments and/or files containing data to upload. These methods may
+ // be called to construct the data sequence to upload, and they may only be
+ // called before Start() is called. For POST requests, the user must call
+ // SetRequestHeaderBy{Id,Name} to set the Content-Type of the request to the
+ // appropriate value before calling Start().
+ //
+ // When uploading data, bytes_len must be non-zero.
+ // When uploading a file range, length must be non-zero. If length
+ // exceeds the end-of-file, the upload is clipped at end-of-file.
+ void AppendBytesToUpload(const char* bytes, int bytes_len);
+ void AppendFileRangeToUpload(const std::wstring& file_path,
+ uint64 offset, uint64 length);
+ void AppendFileToUpload(const std::wstring& file_path) {
+ AppendFileRangeToUpload(file_path, 0, kuint64max);
+ }
+
+ // Set the upload data directly.
+ void set_upload(net::UploadData* upload) { upload_ = upload; }
+
+ // Returns true if the request has a non-empty message body to upload.
+ bool has_upload() const { return upload_ != NULL; }
+
+ // Set an extra request header by ID or name. These methods may only be
+ // called before Start() is called. It is an error to call it later.
+ void SetExtraRequestHeaderById(int header_id, const std::string& value,
+ bool overwrite);
+ void SetExtraRequestHeaderByName(const std::string& name,
+ const std::string& value, bool overwrite);
+
+ // Sets all extra request headers, from a \r\n-delimited string. Any extra
+ // request headers set by other methods are overwritten by this method. This
+ // method may only be called before Start() is called. It is an error to
+ // call it later.
+ void SetExtraRequestHeaders(const std::string& headers);
+
+ // Returns the current load state for the request.
+ net::LoadState GetLoadState() const;
+
+ // Returns the current upload progress in bytes.
+ uint64 GetUploadProgress() const;
+
+ // Get response header(s) by ID or name. These methods may only be called
+ // once the delegate's OnResponseStarted method has been called. Headers
+ // that appear more than once in the response are coalesced, with values
+ // separated by commas (per RFC 2616). This will not work with cookies since
+ // comma can be used in cookie values.
+ // TODO(darin): add API to enumerate response headers.
+ void GetResponseHeaderById(int header_id, std::string* value);
+ void GetResponseHeaderByName(const std::string& name, std::string* value);
+
+ // Get all response headers, \n-delimited and \n\0-terminated. This includes
+ // the response status line. Restrictions on GetResponseHeaders apply.
+ void GetAllResponseHeaders(std::string* headers);
+
+ // The time at which the returned response was requested. For cached
+ // responses, this may be a time well in the past.
+ const Time& request_time() const {
+ return response_info_.request_time;
+ }
+
+ // The time at which the returned response was generated. For cached
+ // responses, this may be a time well in the past.
+ const Time& response_time() const {
+ return response_info_.response_time;
+ }
+
+ // Get all response headers, as a HttpResponseHeaders object. See comments
+ // in HttpResponseHeaders class as to the format of the data.
+ net::HttpResponseHeaders* response_headers() const {
+ return response_info_.headers.get();
+ }
+
+ // Get the SSL connection info.
+ const net::SSLInfo& ssl_info() const {
+ return response_info_.ssl_info;
+ }
+
+ // Returns the cookie values included in the response, if the request is one
+ // that can have cookies. Returns true if the request is a cookie-bearing
+ // type, false otherwise. This method may only be called once the
+ // delegate's OnResponseStarted method has been called.
+ bool GetResponseCookies(ResponseCookies* cookies);
+
+ // Get the mime type. This method may only be called once the delegate's
+ // OnResponseStarted method has been called.
+ void GetMimeType(std::string* mime_type);
+
+ // Get the charset (character encoding). This method may only be called once
+ // the delegate's OnResponseStarted method has been called.
+ void GetCharset(std::string* charset);
+
+ // Returns the HTTP response code (e.g., 200, 404, and so on). This method
+ // may only be called once the delegate's OnResponseStarted method has been
+ // called. For non-HTTP requests, this method returns -1.
+ int GetResponseCode();
+
+ // Access the net::LOAD_* flags modifying this request (see load_flags.h).
+ int load_flags() const { return load_flags_; }
+ void set_load_flags(int flags) { load_flags_ = flags; }
+
+ // Accessors to the pid of the process this request originated from.
+ int origin_pid() const { return origin_pid_; }
+ void set_origin_pid(int proc_id) {
+ origin_pid_ = proc_id;
+ }
+
+ // Returns true if the request is "pending" (i.e., if Start() has been called,
+ // and the response has not yet been called).
+ bool is_pending() const { return is_pending_; }
+
+ // Returns the error status of the request. This value is 0 if there is no
+ // error. Otherwise, it is a value defined by the operating system (e.g., an
+ // error code returned by GetLastError() on windows).
+ const URLRequestStatus& status() const { return status_; }
+
+ // This method is called to start the request. The delegate will receive
+ // a OnResponseStarted callback when the request is started.
+ void Start();
+
+ // This method may be called at any time after Start() has been called to
+ // cancel the request. This method may be called many times, and it has
+ // no effect once the response has completed.
+ void Cancel();
+
+ // Similar to Cancel but sets the error to |os_error| (see net_error_list.h
+ // for values) instead of net::ERR_ABORTED.
+ // Used to attach a reason for canceling a request.
+ void CancelWithError(int os_error);
+
+ // Read initiates an asynchronous read from the response, and must only
+ // be called after the OnResponseStarted callback is received with a
+ // successful status.
+ // If data is available, Read will return true, and the data and length will
+ // be returned immediately. If data is not available, Read returns false,
+ // and an asynchronous Read is initiated. The caller guarantees the
+ // buffer provided will be available until the Read is finished. The
+ // Read is finished when the caller receives the OnReadComplete
+ // callback. OnReadComplete will be always be called, even if there
+ // was a failure.
+ //
+ // The buf parameter is a buffer to receive the data. Once the read is
+ // initiated, the caller guarantees availability of this buffer until
+ // the OnReadComplete is received. The buffer must be at least
+ // max_bytes in length.
+ //
+ // The max_bytes parameter is the maximum number of bytes to read.
+ //
+ // The bytes_read parameter is an output parameter containing the
+ // the number of bytes read. A value of 0 indicates that there is no
+ // more data available to read from the stream.
+ //
+ // If a read error occurs, Read returns false and the request->status
+ // will be set to an error.
+ bool Read(char* buf, int max_bytes, int *bytes_read);
+
+ // One of the following two methods should be called in response to an
+ // OnAuthRequired() callback (and only then).
+ // SetAuth will reissue the request with the given credentials.
+ // CancelAuth will give up and display the error page.
+ void SetAuth(const std::wstring& username, const std::wstring& password);
+ void CancelAuth();
+
+ // This method can be called after some error notifications to instruct this
+ // URLRequest to ignore the current error and continue with the request. To
+ // cancel the request instead, call Cancel().
+ void ContinueDespiteLastError();
+
+ // HTTP request/response header IDs (via some preprocessor fun) for use with
+ // SetRequestHeaderById and GetResponseHeaderById.
+ enum {
+#define HTTP_ATOM(x) HTTP_ ## x,
+#include "net/http/http_atom_list.h"
+#undef HTTP_ATOM
+ };
+
+ // Returns true if performance profiling should be enabled on the
+ // URLRequestJob serving this request.
+ bool enable_profiling() const { return enable_profiling_; }
+
+ void set_enable_profiling(bool profiling) { enable_profiling_ = profiling; }
+
+ // Used to specify the context (cookie store, cache) for this request.
+ URLRequestContext* context() { return context_.get(); }
+ void set_context(URLRequestContext* context) { context_ = context; }
+
+ // Returns the expected content size if available
+ int64 GetExpectedContentSize() const;
+
+ protected:
+ // Allow the URLRequestJob class to control the is_pending() flag.
+ void set_is_pending(bool value) { is_pending_ = value; }
+
+ // Allow the URLRequestJob class to set our status too
+ void set_status(const URLRequestStatus &value) { status_ = value; }
+
+ // Allow the URLRequestJob to redirect this request. Returns net::OK if
+ // successful, otherwise an error code is returned.
+ int Redirect(const GURL& location, int http_status_code);
+
+ private:
+ friend class URLRequestJob;
+
+ // Detaches the job from this request in preparation for this object going
+ // away or the job being replaced. The job will not call us back when it has
+ // been orphaned.
+ void OrphanJob();
+
+ scoped_refptr<URLRequestJob> job_;
+ scoped_refptr<net::UploadData> upload_;
+ GURL url_;
+ GURL original_url_;
+ GURL policy_url_;
+ std::string method_; // "GET", "POST", etc. Should be all uppercase.
+ std::string referrer_;
+ std::string extra_request_headers_;
+ int load_flags_; // Flags indicating the request type for the load;
+ // expected values are LOAD_* enums above.
+
+ // The pid of the process that initiated this request. Initialized to the id
+ // of the current process.
+ int origin_pid_;
+
+ Delegate* delegate_;
+
+ // Current error status of the job. When no error has been encountered, this
+ // will be SUCCESS. If multiple errors have been encountered, this will be
+ // the first non-SUCCESS status seen.
+ URLRequestStatus status_;
+
+ // The HTTP response info, lazily initialized.
+ net::HttpResponseInfo response_info_;
+
+ // Tells us whether the job is outstanding. This is true from the time
+ // Start() is called to the time we dispatch RequestComplete and indicates
+ // whether the job is active.
+ bool is_pending_;
+
+ // Externally-defined data associated with this request
+ UserData* user_data_;
+
+ // Whether to enable performance profiling on the job serving this request.
+ bool enable_profiling_;
+
+ // Number of times we're willing to redirect. Used to guard against
+ // infinite redirects.
+ int redirect_limit_;
+
+ // Contextual information used for this request (can be NULL).
+ scoped_refptr<URLRequestContext> context_;
+
+ // Cached value for use after we've orphaned the job handling the
+ // first transaction in a request involving redirects.
+ uint64 final_upload_progress_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequest);
+};
+
+//-----------------------------------------------------------------------------
+// To help ensure that all requests are cleaned up properly, we keep static
+// counters of live objects. TODO(darin): Move this leak checking stuff into
+// a common place and generalize it so it can be used everywhere (Bug 566229).
+
+#ifndef NDEBUG
+
+struct URLRequestMetrics {
+ int object_count;
+ URLRequestMetrics() : object_count(0) {}
+ ~URLRequestMetrics() {
+ DLOG_IF(WARNING, object_count != 0) <<
+ "Leaking " << object_count << " URLRequest object(s)";
+ }
+};
+
+extern URLRequestMetrics url_request_metrics;
+
+#define URLREQUEST_COUNT_CTOR() url_request_metrics.object_count++
+#define URLREQUEST_COUNT_DTOR() url_request_metrics.object_count--
+
+#else // disable leak checking in release builds...
+
+#define URLREQUEST_COUNT_CTOR()
+#define URLREQUEST_COUNT_DTOR()
+
+#endif
+
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_H__
diff --git a/net/url_request/url_request_about_job.cc b/net/url_request/url_request_about_job.cc
new file mode 100644
index 0000000..b102ad5
--- /dev/null
+++ b/net/url_request/url_request_about_job.cc
@@ -0,0 +1,62 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Simple implementation of about: protocol handler that treats everything as
+// about:blank. No other about: features should be available to web content,
+// so they're not implemented here.
+
+#include "net/url_request/url_request_about_job.h"
+
+#include "base/message_loop.h"
+
+// static
+URLRequestJob* URLRequestAboutJob::Factory(URLRequest* request,
+ const std::string& scheme) {
+ return new URLRequestAboutJob(request);
+}
+
+URLRequestAboutJob::URLRequestAboutJob(URLRequest* request)
+ : URLRequestJob(request) {
+}
+
+void URLRequestAboutJob::Start() {
+ // Start reading asynchronously so that all error reporting and data
+ // callbacks happen as they would for network requests.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestAboutJob::StartAsync));
+}
+
+bool URLRequestAboutJob::GetMimeType(std::string* mime_type) {
+ *mime_type = "text/html";
+ return true;
+}
+
+void URLRequestAboutJob::StartAsync() {
+ NotifyHeadersComplete();
+}
diff --git a/net/url_request/url_request_about_job.h b/net/url_request/url_request_about_job.h
new file mode 100644
index 0000000..f94f9a7
--- /dev/null
+++ b/net/url_request/url_request_about_job.h
@@ -0,0 +1,49 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_ABOUT_JOB_H__
+#define BASE_URL_REQUEST_URL_REQUEST_ABOUT_JOB_H__
+
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+class URLRequestAboutJob : public URLRequestJob {
+ public:
+ URLRequestAboutJob(URLRequest* request);
+
+ virtual void Start();
+ virtual bool GetMimeType(std::string* mime_type);
+
+ static URLRequest::ProtocolFactory Factory;
+
+ private:
+ void StartAsync();
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_ABOUT_JOB_H__
diff --git a/net/url_request/url_request_context.h b/net/url_request/url_request_context.h
new file mode 100644
index 0000000..059df6c
--- /dev/null
+++ b/net/url_request/url_request_context.h
@@ -0,0 +1,105 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This class represents contextual information (cookies, cache, etc.)
+// that's useful when processing resource requests.
+// The class is reference-counted so that it can be cleaned up after any
+// requests that are using it have been completed.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_CONTEXT_H__
+#define BASE_URL_REQUEST_URL_REQUEST_CONTEXT_H__
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/ref_counted.h"
+#include "base/scoped_ptr.h"
+#include "net/base/auth_cache.h"
+#include "net/base/cookie_policy.h"
+#include "net/http/http_transaction_factory.h"
+
+class CookieMonster;
+
+// Subclass to provide application-specific context for URLRequest instances.
+class URLRequestContext :
+ public base::RefCountedThreadSafe<URLRequestContext> {
+ public:
+ URLRequestContext()
+ : http_transaction_factory_(NULL),
+ cookie_store_(NULL),
+ is_off_the_record_(false) {
+ }
+
+ // Gets the http transaction factory for this context.
+ net::HttpTransactionFactory* http_transaction_factory() {
+ return http_transaction_factory_;
+ }
+
+ // Gets the cookie store for this context.
+ CookieMonster* cookie_store() { return cookie_store_; }
+
+ // Gets the cookie policy for this context.
+ CookiePolicy* cookie_policy() { return &cookie_policy_; }
+
+ // Gets the FTP realm authentication cache for this context.
+ AuthCache* ftp_auth_cache() { return &ftp_auth_cache_; }
+
+ // Gets the UA string to use for this context.
+ const std::string& user_agent() const { return user_agent_; }
+
+ // Gets the value of 'Accept-Charset' header field.
+ const std::string& accept_charset() const { return accept_charset_; }
+
+ // Gets the value of 'Accept-Language' header field.
+ const std::string& accept_language() const { return accept_language_; }
+
+ // Returns true if this context is off the record.
+ bool is_off_the_record() { return is_off_the_record_; }
+
+ // Do not call this directly. TODO(darin): extending from RefCounted* should
+ // not require a public destructor!
+ virtual ~URLRequestContext() {}
+
+ protected:
+ // The following members are expected to be initialized and owned by
+ // subclasses.
+ net::HttpTransactionFactory* http_transaction_factory_;
+ CookieMonster* cookie_store_;
+ CookiePolicy cookie_policy_;
+ AuthCache ftp_auth_cache_;
+ std::string user_agent_;
+ bool is_off_the_record_;
+ std::string accept_language_;
+ std::string accept_charset_;
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestContext);
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_CONTEXT_H__
diff --git a/net/url_request/url_request_error_job.cc b/net/url_request/url_request_error_job.cc
new file mode 100644
index 0000000..f9c7d38
--- /dev/null
+++ b/net/url_request/url_request_error_job.cc
@@ -0,0 +1,46 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_error_job.h"
+
+#include "base/message_loop.h"
+#include "net/base/net_errors.h"
+
+URLRequestErrorJob::URLRequestErrorJob(URLRequest* request, int error)
+ : URLRequestJob(request), error_(error) {
+}
+
+void URLRequestErrorJob::Start() {
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestErrorJob::StartAsync));
+}
+
+void URLRequestErrorJob::StartAsync() {
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error_));
+}
diff --git a/net/url_request/url_request_error_job.h b/net/url_request/url_request_error_job.h
new file mode 100644
index 0000000..d93ca7f
--- /dev/null
+++ b/net/url_request/url_request_error_job.h
@@ -0,0 +1,49 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Invalid URLs go through this URLRequestJob class rather than being passed
+// to the default job handler.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_ERROR_JOB_H__
+#define BASE_URL_REQUEST_URL_REQUEST_ERROR_JOB_H__
+
+#include "net/url_request/url_request_job.h"
+
+class URLRequestErrorJob : public URLRequestJob {
+ public:
+ URLRequestErrorJob(URLRequest* request, int error);
+
+ virtual void Start();
+
+ private:
+ int error_;
+ void StartAsync();
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_ERROR_JOB_H__
diff --git a/net/url_request/url_request_file_dir_job.cc b/net/url_request/url_request_file_dir_job.cc
new file mode 100644
index 0000000..5ad44a6
--- /dev/null
+++ b/net/url_request/url_request_file_dir_job.cc
@@ -0,0 +1,207 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_file_dir_job.h"
+
+#include "base/file_util.h"
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/net_util.h"
+#include "net/base/wininet_util.h"
+#include "net/url_request/url_request.h"
+
+using std::string;
+using std::wstring;
+
+using net::WinInetUtil;
+
+URLRequestFileDirJob::URLRequestFileDirJob(URLRequest* request,
+ const wstring& dir_path)
+ : URLRequestJob(request),
+ dir_path_(dir_path),
+ canceled_(false),
+ list_complete_(false),
+ wrote_header_(false),
+ read_pending_(false),
+ read_buffer_(NULL),
+ read_buffer_length_(0) {
+}
+
+URLRequestFileDirJob::~URLRequestFileDirJob() {
+ DCHECK(read_pending_ == false);
+ DCHECK(lister_ == NULL);
+}
+
+void URLRequestFileDirJob::Start() {
+ // Start reading asynchronously so that all error reporting and data
+ // callbacks happen as they would for network requests.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFileDirJob::StartAsync));
+}
+
+void URLRequestFileDirJob::StartAsync() {
+ DCHECK(!lister_);
+
+ // AddRef so that *this* cannot be destroyed while the lister_
+ // is trying to feed us data.
+
+ AddRef();
+ lister_ = new DirectoryLister(dir_path_, this);
+ lister_->Start();
+
+ NotifyHeadersComplete();
+}
+
+void URLRequestFileDirJob::Kill() {
+ if (canceled_)
+ return;
+
+ canceled_ = true;
+
+ // Don't call CloseLister or dispatch an error to the URLRequest because we
+ // want OnListDone to be called to also write the error to the output stream.
+ // OnListDone will notify the URLRequest at this time.
+ if (lister_)
+ lister_->Cancel();
+}
+
+bool URLRequestFileDirJob::ReadRawData(char* buf, int buf_size,
+ int *bytes_read) {
+ DCHECK(bytes_read);
+ *bytes_read = 0;
+
+ if (is_done())
+ return true;
+
+ if (FillReadBuffer(buf, buf_size, bytes_read))
+ return true;
+
+ // We are waiting for more data
+ read_pending_ = true;
+ read_buffer_ = buf;
+ read_buffer_length_ = buf_size;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return false;
+}
+
+bool URLRequestFileDirJob::GetMimeType(string* mime_type) {
+ *mime_type = "text/html";
+ return true;
+}
+
+bool URLRequestFileDirJob::GetCharset(string* charset) {
+ // All the filenames are converted to UTF-8 before being added.
+ *charset = "utf-8";
+ return true;
+}
+
+void URLRequestFileDirJob::OnListFile(const WIN32_FIND_DATA& data) {
+ FILETIME local_time;
+ FileTimeToLocalFileTime(&data.ftLastWriteTime, &local_time);
+ int64 size = (static_cast<unsigned __int64>(data.nFileSizeHigh) << 32) |
+ data.nFileSizeLow;
+
+ // We wait to write out the header until we get the first file, so that we
+ // can catch errors from DirectoryLister and show an error page.
+ if (!wrote_header_) {
+ data_.append(net_util::GetDirectoryListingHeader(WideToUTF8(dir_path_)));
+ wrote_header_ = true;
+ }
+
+ data_.append(net_util::GetDirectoryListingEntry(
+ WideToUTF8(data.cFileName), data.dwFileAttributes, size, &local_time));
+
+ // TODO(darin): coalesce more?
+
+ CompleteRead();
+}
+
+void URLRequestFileDirJob::OnListDone(int error) {
+ CloseLister();
+
+ if (error) {
+ read_pending_ = false;
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(error)));
+ } else if (canceled_) {
+ read_pending_ = false;
+ NotifyCanceled();
+ } else {
+ list_complete_ = true;
+ CompleteRead();
+ }
+
+ Release(); // The Lister is finished; may delete *this*
+}
+
+void URLRequestFileDirJob::CloseLister() {
+ if (lister_) {
+ lister_->Cancel();
+ lister_->set_delegate(NULL);
+ lister_ = NULL;
+ }
+}
+
+bool URLRequestFileDirJob::FillReadBuffer(char *buf, int buf_size,
+ int *bytes_read) {
+ DCHECK(bytes_read);
+
+ *bytes_read = 0;
+
+ int count = std::min(buf_size, static_cast<int>(data_.size()));
+ if (count) {
+ memcpy(buf, &data_[0], count);
+ data_.erase(0, count);
+ *bytes_read = count;
+ return true;
+ } else if (list_complete_) {
+ // EOF
+ return true;
+ }
+ return false;
+}
+
+void URLRequestFileDirJob::CompleteRead() {
+ if (read_pending_) {
+ int bytes_read;
+ if (FillReadBuffer(read_buffer_, read_buffer_length_, &bytes_read)) {
+ // We completed the read, so reset the read buffer.
+ read_pending_ = false;
+ read_buffer_ = NULL;
+ read_buffer_length_ = 0;
+
+ SetStatus(URLRequestStatus());
+ NotifyReadComplete(bytes_read);
+ } else {
+ NOTREACHED();
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, 0)); // TODO: Better error code.
+ }
+ }
+}
diff --git a/net/url_request/url_request_file_dir_job.h b/net/url_request/url_request_file_dir_job.h
new file mode 100644
index 0000000..78d6863
--- /dev/null
+++ b/net/url_request/url_request_file_dir_job.h
@@ -0,0 +1,85 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_FILE_DIR_JOB_H__
+#define NET_URL_REQUEST_URL_REQUEST_FILE_DIR_JOB_H__
+
+#include "net/base/directory_lister.h"
+#include "net/url_request/url_request_job.h"
+
+class URLRequestFileDirJob : public URLRequestJob,
+ public DirectoryLister::Delegate {
+ public:
+ URLRequestFileDirJob(URLRequest* request, const std::wstring& dir_path);
+ virtual ~URLRequestFileDirJob();
+
+ // URLRequestJob methods:
+ virtual void Start();
+ virtual void StartAsync();
+ virtual void Kill();
+ virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool GetMimeType(std::string* mime_type);
+ virtual bool GetCharset(std::string* charset);
+
+ // DirectoryLister::Delegate methods:
+ virtual void OnListFile(const WIN32_FIND_DATA& data);
+ virtual void OnListDone(int error);
+
+ private:
+ void CloseLister();
+ // When we have data and a read has been pending, this function
+ // will fill the response buffer and notify the request
+ // appropriately.
+ void CompleteRead();
+
+ // Fills a buffer with the output.
+ bool FillReadBuffer(char *buf, int buf_size, int *bytes_read);
+
+ scoped_refptr<DirectoryLister> lister_;
+ std::wstring dir_path_;
+ std::string data_;
+ bool canceled_;
+
+ // Indicates whether we have the complete list of the dir
+ bool list_complete_;
+
+ // Indicates whether we have written the HTML header
+ bool wrote_header_;
+
+ // To simulate Async IO, we hold onto the Reader's buffer while
+ // we wait for IO to complete. When done, we fill the buffer
+ // manually.
+ bool read_pending_;
+ char *read_buffer_;
+ int read_buffer_length_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestFileDirJob);
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_FILE_DIR_JOB_H__
diff --git a/net/url_request/url_request_file_job.cc b/net/url_request/url_request_file_job.cc
new file mode 100644
index 0000000..17e5f5c
--- /dev/null
+++ b/net/url_request/url_request_file_job.cc
@@ -0,0 +1,349 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// For loading files, we make use of overlapped i/o to ensure that reading from
+// the filesystem (e.g., a network filesystem) does not block the calling
+// thread. An alternative approach would be to use a background thread or pool
+// of threads, but it seems better to leverage the operating system's ability
+// to do background file reads for us.
+//
+// Since overlapped reads require a 'static' buffer for the duration of the
+// asynchronous read, the URLRequestFileJob keeps a buffer as a member var. In
+// URLRequestFileJob::Read, data is simply copied from the object's buffer into
+// the given buffer. If there is no data to copy, the URLRequestFileJob
+// attempts to read more from the file to fill its buffer. If reading from the
+// file does not complete synchronously, then the URLRequestFileJob waits for a
+// signal from the OS that the overlapped read has completed. It does so by
+// leveraging the MessageLoop::WatchObject API.
+
+#include <process.h>
+#include <windows.h>
+
+#include "net/url_request/url_request_file_job.h"
+
+#include "base/file_util.h"
+#include "base/string_util.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/mime_util.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/base/wininet_util.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_file_dir_job.h"
+
+using net::WinInetUtil;
+
+namespace {
+
+// Thread used to run ResolveFile. The parameter is a pointer to the
+// URLRequestFileJob object.
+DWORD WINAPI NetworkFileThread(LPVOID param) {
+ URLRequestFileJob* job = reinterpret_cast<URLRequestFileJob*>(param);
+ job->ResolveFile();
+ return 0;
+}
+
+} // namespace
+
+// static
+URLRequestJob* URLRequestFileJob::Factory(URLRequest* request,
+ const std::string& scheme) {
+ std::wstring file_path;
+ if (net_util::FileURLToFilePath(request->url(), &file_path)) {
+ if (file_path[file_path.size() - 1] == file_util::kPathSeparator) {
+ // Only directories have trailing slashes.
+ return new URLRequestFileDirJob(request, file_path);
+ }
+ }
+
+ // Use a regular file request job for all non-directories (including invalid
+ // file names).
+ URLRequestFileJob* job = new URLRequestFileJob(request);
+ job->file_path_ = file_path;
+ return job;
+}
+
+URLRequestFileJob::URLRequestFileJob(URLRequest* request)
+ : URLRequestJob(request),
+ handle_(INVALID_HANDLE_VALUE),
+ is_waiting_(false),
+ is_directory_(false),
+ is_not_found_(false),
+ network_file_thread_(NULL),
+ loop_(NULL) {
+ memset(&overlapped_, 0, sizeof(overlapped_));
+}
+
+URLRequestFileJob::~URLRequestFileJob() {
+ CloseHandles();
+
+ // The thread might still be running. We need to kill it because it holds
+ // a reference to this object.
+ if (network_file_thread_) {
+ TerminateThread(network_file_thread_, 0);
+ CloseHandle(network_file_thread_);
+ }
+}
+
+// This function can be called on the main thread or on the network file thread.
+// When the request is done, we call StartAsync on the main thread.
+void URLRequestFileJob::ResolveFile() {
+ WIN32_FILE_ATTRIBUTE_DATA file_attributes = {0};
+ if (!GetFileAttributesEx(file_path_.c_str(),
+ GetFileExInfoStandard,
+ &file_attributes)) {
+ is_not_found_ = true;
+ } else {
+ if (file_attributes.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+ is_directory_ = true;
+ } else {
+ // Set the file size as expected size to be read.
+ ULARGE_INTEGER file_size;
+ file_size.HighPart = file_attributes.nFileSizeHigh;
+ file_size.LowPart = file_attributes.nFileSizeLow;
+
+ set_expected_content_size(file_size.QuadPart);
+ }
+ }
+
+
+ // We need to protect the loop_ pointer with a lock because if we are running
+ // on the network file thread, it is possible that the main thread is
+ // executing Kill() at this moment. Kill() sets the loop_ to NULL because it
+ // might be going away.
+ AutoLock lock(loop_lock_);
+ if (loop_) {
+ // Start reading asynchronously so that all error reporting and data
+ // callbacks happen as they would for network requests.
+ loop_->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFileJob::StartAsync));
+ }
+}
+
+void URLRequestFileJob::Start() {
+ // This is the loop on which we should execute StartAsync. This is used in
+ // ResolveFile().
+ loop_ = MessageLoop::current();
+
+ if (!file_path_.compare(0, 2, L"\\\\")) {
+ // This is on a network share. It might be slow to check if it's a directory
+ // or a file. We need to do it in another thread.
+ network_file_thread_ = CreateThread(NULL, 0, &NetworkFileThread, this, 0,
+ NULL);
+ } else {
+ // We can call the function directly because it's going to be fast.
+ ResolveFile();
+ }
+}
+
+void URLRequestFileJob::Kill() {
+ // If we are killed while waiting for an overlapped result...
+ if (is_waiting_) {
+ MessageLoop::current()->WatchObject(overlapped_.hEvent, NULL);
+ is_waiting_ = false;
+ Release();
+ }
+ CloseHandles();
+ URLRequestJob::Kill();
+
+ // It is possible that the network file thread is running and will invoke
+ // StartAsync() on loop_. We set loop_ to NULL here because the message loop
+ // might be going away and we don't want the other thread to call StartAsync()
+ // on this loop anymore. We protect loop_ with a lock in case the other thread
+ // is currenly invoking the call.
+ AutoLock lock(loop_lock_);
+ loop_ = NULL;
+}
+
+bool URLRequestFileJob::ReadRawData(char* dest, int dest_size,
+ int *bytes_read) {
+ DCHECK_NE(dest_size, 0);
+ DCHECK(bytes_read);
+ DCHECK(!is_waiting_);
+
+ *bytes_read = 0;
+
+ DWORD bytes;
+ if (ReadFile(handle_, dest, dest_size, &bytes, &overlapped_)) {
+ // data is immediately available
+ overlapped_.Offset += bytes;
+ *bytes_read = static_cast<int>(bytes);
+ DCHECK(!is_waiting_);
+ DCHECK(!is_done());
+ return true;
+ }
+
+ // otherwise, a read error occured.
+ DWORD err = GetLastError();
+ if (err == ERROR_IO_PENDING) {
+ // OK, wait for the object to become signaled
+ MessageLoop::current()->WatchObject(overlapped_.hEvent, this);
+ is_waiting_ = true;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ AddRef();
+ return false;
+ }
+ if (err == ERROR_HANDLE_EOF) {
+ // OK, nothing more to read
+ return true;
+ }
+
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(err)));
+ return false;
+}
+
+bool URLRequestFileJob::GetMimeType(std::string* mime_type) {
+ DCHECK(request_);
+ return mime_util::GetMimeTypeFromFile(file_path_, mime_type);
+}
+
+void URLRequestFileJob::CloseHandles() {
+ DCHECK(!is_waiting_);
+ if (handle_ != INVALID_HANDLE_VALUE) {
+ CloseHandle(handle_);
+ handle_ = INVALID_HANDLE_VALUE;
+ }
+ if (overlapped_.hEvent) {
+ CloseHandle(overlapped_.hEvent);
+ overlapped_.hEvent = NULL;
+ }
+}
+
+void URLRequestFileJob::StartAsync() {
+ if (network_file_thread_) {
+ CloseHandle(network_file_thread_);
+ network_file_thread_ = NULL;
+ }
+
+ // The request got killed, we need to stop.
+ if (!loop_)
+ return;
+
+ // We may have been orphaned...
+ if (!request_)
+ return;
+
+ // This is not a file, this is a directory.
+ if (is_directory_) {
+ NotifyHeadersComplete();
+ return;
+ }
+
+ if (is_not_found_) {
+ // some kind of invalid file URI
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ net::ERR_FILE_NOT_FOUND));
+ } else {
+ handle_ = CreateFile(file_path_.c_str(),
+ GENERIC_READ|SYNCHRONIZE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_OVERLAPPED | FILE_FLAG_SEQUENTIAL_SCAN,
+ NULL);
+ if (handle_ == INVALID_HANDLE_VALUE) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(GetLastError())));
+ } else {
+ // Get setup for overlapped reads (w/ a manual reset event)
+ overlapped_.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
+ }
+ }
+
+ NotifyHeadersComplete();
+}
+
+void URLRequestFileJob::OnObjectSignaled(HANDLE object) {
+ DCHECK(overlapped_.hEvent == object);
+ DCHECK(is_waiting_);
+
+ // We'll resume watching this handle if need be when we do
+ // another IO.
+ MessageLoop::current()->WatchObject(object, NULL);
+ is_waiting_ = false;
+
+ DWORD bytes_read = 0;
+ if (!GetOverlappedResult(handle_, &overlapped_, &bytes_read, FALSE)) {
+ DWORD err = GetLastError();
+ if (err == ERROR_HANDLE_EOF) {
+ // successfully read all data
+ NotifyDone(URLRequestStatus());
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(err)));
+ }
+ } else if (bytes_read) {
+ overlapped_.Offset += bytes_read;
+ // clear the IO_PENDING status
+ SetStatus(URLRequestStatus());
+ } else {
+ // there was no more data so we're done
+ NotifyDone(URLRequestStatus());
+ }
+ NotifyReadComplete(bytes_read);
+
+ Release(); // Balance with AddRef from FillBuf; may destroy |this|
+}
+
+bool URLRequestFileJob::IsRedirectResponse(GURL* location,
+ int* http_status_code) {
+ if (is_directory_) {
+ // This happens when we discovered the file is a directory, so needs a slash
+ // at the end of the path.
+ std::string new_path = request_->url().path();
+ new_path.push_back('/');
+ GURL::Replacements replacements;
+ replacements.SetPathStr(new_path);
+
+ *location = request_->url().ReplaceComponents(replacements);
+ *http_status_code = 301; // simulate a permanent redirect
+ return true;
+ }
+
+ size_t found;
+ found = file_path_.find_last_of('.');
+
+ // We just resolve .lnk file, ignor others.
+ if (found == std::string::npos ||
+ !LowerCaseEqualsASCII(file_path_.substr(found), ".lnk"))
+ return false;
+
+ std::wstring new_path = file_path_;
+ bool resolved;
+ resolved = file_util::ResolveShortcut(&new_path);
+
+ // If shortcut is not resolved succesfully, do not redirect.
+ if (!resolved)
+ return false;
+
+ *location = net_util::FilePathToFileURL(new_path);
+ *http_status_code = 301;
+ return true;
+}
diff --git a/net/url_request/url_request_file_job.h b/net/url_request/url_request_file_job.h
new file mode 100644
index 0000000..1851444
--- /dev/null
+++ b/net/url_request/url_request_file_job.h
@@ -0,0 +1,98 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_FILE_JOB_H__
+#define BASE_URL_REQUEST_URL_REQUEST_FILE_JOB_H__
+
+#include "base/lock.h"
+#include "base/message_loop.h"
+#include "base/thread.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+// A request job that handles reading file URLs
+class URLRequestFileJob : public URLRequestJob,
+ protected MessageLoop::Watcher {
+ public:
+ URLRequestFileJob(URLRequest* request);
+ virtual ~URLRequestFileJob();
+
+ virtual void Start();
+ virtual void Kill();
+ virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
+
+ virtual bool GetMimeType(std::string* mime_type);
+
+ // Checks the status of the file. Set is_directory_ and is_not_found_
+ // accordingly. Call StartAsync on the main message loop when it's done.
+ void ResolveFile();
+
+ static URLRequest::ProtocolFactory Factory;
+
+ protected:
+ // The OS-specific full path name of the file
+ std::wstring file_path_;
+
+ private:
+ // The net util test wants to run our FileURLToFilePath function.
+ FRIEND_TEST(NetUtilTest, FileURLConversion);
+
+ void CloseHandles();
+ void StartAsync();
+
+ // MessageLoop::Watcher callback
+ virtual void OnObjectSignaled(HANDLE object);
+
+ // We use overlapped reads to ensure that reads from network file systems do
+ // not hang the application thread.
+ HANDLE handle_;
+ OVERLAPPED overlapped_;
+ bool is_waiting_; // true when waiting for overlapped result
+ bool is_directory_; // true when the file request is for a direcotry.
+ bool is_not_found_; // true when the file requested does not exist.
+
+ // This lock ensure that the network_file_thread is not using the loop_ after
+ // is has been set to NULL in Kill().
+ Lock loop_lock_;
+
+ // Main message loop where StartAsync has to be called.
+ MessageLoop* loop_;
+
+ // Thread used to query the attributes of files on the network.
+ // We need to do it on a separate thread because it can be really
+ // slow.
+ HANDLE network_file_thread_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestFileJob);
+};
+
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_FILE_JOB_H__
diff --git a/net/url_request/url_request_filter.cc b/net/url_request/url_request_filter.cc
new file mode 100644
index 0000000..57e76ba
--- /dev/null
+++ b/net/url_request/url_request_filter.cc
@@ -0,0 +1,156 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_filter.h"
+
+#include <set>
+
+#include "net/url_request/url_request_inet_job.h"
+
+URLRequestFilter* URLRequestFilter::shared_instance_ = NULL;
+
+/* static */
+URLRequestFilter* URLRequestFilter::GetInstance() {
+ if (!shared_instance_)
+ shared_instance_ = new URLRequestFilter;
+ return shared_instance_;
+}
+
+/* static */
+URLRequestJob* URLRequestFilter::Factory(URLRequest* request,
+ const std::string& scheme) {
+ // Returning null here just means that the built-in handler will be used.
+ return GetInstance()->FindRequestHandler(request, scheme);
+}
+
+void URLRequestFilter::AddHostnameHandler(const std::string& scheme,
+ const std::string& hostname, URLRequest::ProtocolFactory* factory) {
+ hostname_handler_map_[make_pair(scheme, hostname)] = factory;
+
+ // Register with the ProtocolFactory.
+ URLRequest::RegisterProtocolFactory(scheme,
+ &URLRequestFilter::Factory);
+
+#ifndef NDEBUG
+ // Check to see if we're masking URLs in the url_handler_map_.
+ for (UrlHandlerMap::const_iterator i = url_handler_map_.begin();
+ i != url_handler_map_.end(); ++i) {
+ const GURL& url = GURL(i->first);
+ HostnameHandlerMap::iterator host_it =
+ hostname_handler_map_.find(make_pair(url.scheme(), url.host()));
+ if (host_it != hostname_handler_map_.end())
+ NOTREACHED();
+ }
+#endif // !NDEBUG
+}
+
+void URLRequestFilter::RemoveHostnameHandler(const std::string& scheme,
+ const std::string& hostname) {
+ HostnameHandlerMap::iterator iter =
+ hostname_handler_map_.find(make_pair(scheme, hostname));
+ DCHECK(iter != hostname_handler_map_.end());
+
+ hostname_handler_map_.erase(iter);
+ // Note that we don't unregister from the URLRequest ProtocolFactory as this
+ // would left no protocol factory for the scheme. URLRequestFilter::Factory
+ // will keep forwarding the requests to the URLRequestInetJob.
+}
+
+bool URLRequestFilter::AddUrlHandler(const GURL& url,
+ URLRequest::ProtocolFactory* factory) {
+ if (!url.is_valid())
+ return false;
+ url_handler_map_[url.spec()] = factory;
+
+ // Register with the ProtocolFactory.
+ URLRequest::RegisterProtocolFactory(url.scheme(),
+ &URLRequestFilter::Factory);
+#ifndef NDEBUG
+ // Check to see if this URL is masked by a hostname handler.
+ HostnameHandlerMap::iterator host_it =
+ hostname_handler_map_.find(make_pair(url.scheme(), url.host()));
+ if (host_it != hostname_handler_map_.end())
+ NOTREACHED();
+#endif // !NDEBUG
+
+ return true;
+}
+
+void URLRequestFilter::RemoveUrlHandler(const GURL& url) {
+ UrlHandlerMap::iterator iter = url_handler_map_.find(url.spec());
+ DCHECK(iter != url_handler_map_.end());
+
+ url_handler_map_.erase(iter);
+ // Note that we don't unregister from the URLRequest ProtocolFactory as this
+ // would left no protocol factory for the scheme. URLRequestFilter::Factory
+ // will keep forwarding the requests to the URLRequestInetJob.
+}
+
+void URLRequestFilter::ClearHandlers() {
+ // Unregister with the ProtocolFactory.
+ std::set<std::string> schemes;
+ for (UrlHandlerMap::const_iterator i = url_handler_map_.begin();
+ i != url_handler_map_.end(); ++i) {
+ schemes.insert(GURL(i->first).scheme());
+ }
+ for (HostnameHandlerMap::const_iterator i = hostname_handler_map_.begin();
+ i != hostname_handler_map_.end(); ++i) {
+ schemes.insert(i->first.first);
+ }
+ for (std::set<std::string>::const_iterator scheme = schemes.begin();
+ scheme != schemes.end(); ++scheme) {
+ URLRequest::RegisterProtocolFactory(*scheme, NULL);
+ }
+
+ url_handler_map_.clear();
+ hostname_handler_map_.clear();
+}
+
+URLRequestJob* URLRequestFilter::FindRequestHandler(URLRequest* request,
+ const std::string& scheme) {
+ URLRequestJob* job = NULL;
+ if (request->url().is_valid()) {
+ // Check the hostname map first.
+ const std::string& hostname = request->url().host();
+
+ HostnameHandlerMap::iterator i =
+ hostname_handler_map_.find(make_pair(scheme, hostname));
+ if (i != hostname_handler_map_.end())
+ job = i->second(request, scheme);
+
+ if (!job) {
+ // Not in the hostname map, check the url map.
+ const std::string& url = request->url().spec();
+ UrlHandlerMap::iterator i = url_handler_map_.find(url);
+ if (i != url_handler_map_.end())
+ job = i->second(request, scheme);
+ }
+ }
+ return job;
+}
diff --git a/net/url_request/url_request_filter.h b/net/url_request/url_request_filter.h
new file mode 100644
index 0000000..6bdb2bd
--- /dev/null
+++ b/net/url_request/url_request_filter.h
@@ -0,0 +1,105 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A class to help filter URLRequest jobs based on the URL of the request
+// rather than just the scheme. Example usage:
+//
+// // Use as an "http" handler.
+// URLRequest::RegisterProtocolFactory("http", &URLRequestFilter::Factory);
+// // Add special handling for the URL http://foo.com/
+// URLRequestFilter::GetInstance()->AddUrlHandler(
+// GURL("http://foo.com/"),
+// &URLRequestCustomJob::Factory);
+//
+// If URLRequestFilter::Factory can't find a handle for the request, it passes
+// it through to URLRequestInetJob::Factory and lets the default network stack
+// handle it.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_FILTER_H__
+#define BASE_URL_REQUEST_URL_REQUEST_FILTER_H__
+
+#include <hash_map>
+#include <map>
+#include <string>
+
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+class GURL;
+
+class URLRequestFilter {
+ public:
+ // scheme,hostname -> ProtocolFactory
+ typedef std::map<std::pair<std::string, std::string>,
+ URLRequest::ProtocolFactory*> HostnameHandlerMap;
+ typedef stdext::hash_map<std::string, URLRequest::ProtocolFactory*>
+ UrlHandlerMap;
+
+ // Singleton instance for use.
+ static URLRequestFilter* GetInstance();
+
+ static URLRequest::ProtocolFactory Factory;
+
+ void AddHostnameHandler(const std::string& scheme,
+ const std::string& hostname,
+ URLRequest::ProtocolFactory* factory);
+ void RemoveHostnameHandler(const std::string& scheme,
+ const std::string& hostname);
+
+ // Returns true if we successfully added the URL handler. This will replace
+ // old handlers for the URL if one existed.
+ bool AddUrlHandler(const GURL& url, URLRequest::ProtocolFactory* factory);
+
+ void RemoveUrlHandler(const GURL& url);
+
+ // Clear all the existing URL handlers and unregister with the
+ // ProtocolFactory.
+ void ClearHandlers();
+
+ protected:
+ URLRequestFilter() { }
+
+ // Helper method that looks up the request in the url_handler_map_.
+ URLRequestJob* FindRequestHandler(URLRequest* request,
+ const std::string& scheme);
+
+ // Maps hostnames to factories. Hostnames take priority over URLs.
+ HostnameHandlerMap hostname_handler_map_;
+
+ // Maps URLs to factories.
+ UrlHandlerMap url_handler_map_;
+
+ private:
+ // Singleton instance.
+ static URLRequestFilter* shared_instance_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestFilter);
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_FILTER_H__
diff --git a/net/url_request/url_request_ftp_job.cc b/net/url_request/url_request_ftp_job.cc
new file mode 100644
index 0000000..f619609
--- /dev/null
+++ b/net/url_request/url_request_ftp_job.cc
@@ -0,0 +1,547 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_ftp_job.h"
+
+#include <windows.h>
+#include <wininet.h>
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/base/auth.h"
+#include "net/base/load_flags.h"
+#include "net/base/net_util.h"
+#include "net/base/wininet_util.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/base/escape.h"
+
+using std::string;
+
+using net::WinInetUtil;
+
+// When building the directory listing, the period to wait before notifying
+// the parent class that we wrote the data.
+#define kFtpBufferTimeMs 50
+
+static bool UnescapeAndValidatePath(const URLRequest* request,
+ std::string* unescaped_path) {
+ // Path in GURL is %-encoded UTF-8. FTP servers do not
+ // understand %-escaped path so that we have to unescape leading to an
+ // unescaped UTF-8 path. Then, the presence of NULL, CR and LF is checked
+ // because they're not allowed in FTP.
+ // TODO(jungshik) Even though RFC 2640 specifies that UTF-8 be used.
+ // There are many FTP servers that use legacy encodings. For them,
+ // we need to identify the encoding and convert to that encoding.
+ static const std::string kInvalidChars("\x00\x0d\x0a", 3);
+ *unescaped_path = UnescapeURLComponent(request->url().path(),
+ UnescapeRule::SPACES | UnescapeRule::PERCENTS);
+ if (unescaped_path->find_first_of(kInvalidChars) != std::string::npos) {
+ SetLastError(ERROR_INTERNET_INVALID_URL);
+ // GURL path should not contain '%00' which is NULL(0x00) when unescaped.
+ // URLRequestFtpJob should not have been invoked for an invalid GURL.
+ DCHECK(unescaped_path->find(std::string("\x00", 1)) == std::string::npos) <<
+ "Path should not contain %00.";
+ return false;
+ }
+ return true;
+}
+
+// static
+URLRequestJob* URLRequestFtpJob::Factory(URLRequest* request,
+ const std::string &scheme) {
+ DCHECK(scheme == "ftp");
+
+ if (request->url().has_port() &&
+ !net_util::IsPortAllowedByFtp(request->url().IntPort()))
+ return new URLRequestErrorJob(request, net::ERR_UNSAFE_PORT);
+
+ return new URLRequestFtpJob(request);
+}
+
+URLRequestFtpJob::URLRequestFtpJob(URLRequest* request)
+ : URLRequestInetJob(request), state_(START), is_directory_(false),
+ dest_(NULL), dest_size_(0) {
+}
+
+URLRequestFtpJob::~URLRequestFtpJob() {
+}
+
+void URLRequestFtpJob::Start() {
+ GURL parts(request_->url());
+ const std::string& scheme = parts.scheme();
+
+ // We should only be dealing with FTP at this point:
+ DCHECK(LowerCaseEqualsASCII(scheme, "ftp"));
+
+ SendRequest();
+}
+
+bool URLRequestFtpJob::GetMimeType(std::string* mime_type) {
+ if (!is_directory_)
+ return false;
+
+ mime_type->assign("text/html");
+ return true;
+}
+
+void URLRequestFtpJob::OnCancelAuth() {
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFtpJob::ContinueNotifyHeadersComplete));
+}
+
+void URLRequestFtpJob::OnSetAuth() {
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFtpJob::SendRequest));
+}
+
+void URLRequestFtpJob::SendRequest() {
+ state_ = CONNECTING;
+
+ DWORD flags =
+ INTERNET_FLAG_KEEP_CONNECTION |
+ INTERNET_FLAG_EXISTING_CONNECT |
+ INTERNET_FLAG_PASSIVE |
+ INTERNET_FLAG_RAW_DATA;
+
+ // It doesn't make sense to ask for both a cache validation and a
+ // reload at the same time.
+ DCHECK(!((request_->load_flags() & net::LOAD_VALIDATE_CACHE) &&
+ (request_->load_flags() & net::LOAD_BYPASS_CACHE)));
+
+ if (request_->load_flags() & net::LOAD_BYPASS_CACHE)
+ flags |= INTERNET_FLAG_RELOAD;
+
+ // Apply authentication if we have any, otherwise authenticate
+ // according to FTP defaults. (See InternetConnect documentation.)
+ // First, check if we have auth in cache, then check URL.
+ // That way a user can re-enter credentials, and we'll try with their
+ // latest input rather than always trying what they specified
+ // in the url (if anything).
+ string username, password;
+ bool have_auth = false;
+ if (server_auth_ != NULL && server_auth_->state == AUTH_STATE_HAVE_AUTH) {
+ // Add auth info to cache
+ have_auth = true;
+ username = WideToUTF8(server_auth_->username);
+ password = WideToUTF8(server_auth_->password);
+ request_->context()->ftp_auth_cache()->Add(request_->url().host(),
+ server_auth_.get());
+ } else {
+ if (request_->url().has_username()) {
+ username = request_->url().username();
+ password = request_->url().has_password() ? request_->url().password() :
+ "";
+ have_auth = true;
+ }
+ }
+
+ int port = request_->url().has_port() ?
+ request_->url().IntPort() : INTERNET_DEFAULT_FTP_PORT;
+
+ connection_handle_ = InternetConnectA(GetTheInternet(),
+ request_->url().host().c_str(),
+ port,
+ have_auth ? username.c_str() : NULL,
+ have_auth ? password.c_str() : NULL,
+ INTERNET_SERVICE_FTP, flags,
+ reinterpret_cast<DWORD_PTR>(this));
+
+ if (connection_handle_) {
+ OnConnect();
+ } else {
+ ProcessRequestError(GetLastError());
+ }
+}
+
+void URLRequestFtpJob::OnIOComplete(const AsyncResult& result) {
+ if (state_ == CONNECTING) {
+ switch (result.dwError) {
+ case ERROR_NO_MORE_FILES:
+ // url is an empty directory
+ OnStartDirectoryTraversal();
+ OnFinishDirectoryTraversal();
+ return;
+ case ERROR_INTERNET_LOGIN_FAILURE:
+ // fall through
+ case ERROR_INTERNET_INCORRECT_USER_NAME:
+ // fall through
+ case ERROR_INTERNET_INCORRECT_PASSWORD:
+ if (server_auth_ != NULL &&
+ server_auth_->state == AUTH_STATE_HAVE_AUTH) {
+ request_->context()->ftp_auth_cache()->Remove(request_->url().host());
+ } else {
+ server_auth_ = new AuthData();
+ }
+ // Try again, prompting for authentication.
+ server_auth_->state = AUTH_STATE_NEED_AUTH;
+ // The io completed fine, the error was due to invalid auth.
+ SetStatus(URLRequestStatus());
+ NotifyHeadersComplete();
+ return;
+ case ERROR_SUCCESS:
+ connection_handle_ = (HINTERNET)result.dwResult;
+ OnConnect();
+ return;
+ case ERROR_INTERNET_EXTENDED_ERROR: {
+ DWORD extended_err(ERROR_SUCCESS);
+ DWORD size = 1;
+ char buffer[1];
+ if (!InternetGetLastResponseInfoA(&extended_err, buffer, &size))
+ // We don't care about the error text here, so the only acceptable
+ // error is one regarding insufficient buffer length.
+ DCHECK(GetLastError() == ERROR_INSUFFICIENT_BUFFER);
+ if (extended_err != ERROR_SUCCESS) {
+ CleanupConnection();
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(extended_err)));
+ return;
+ }
+ // Fall through in the case we saw ERROR_INTERNET_EXTENDED_ERROR but
+ // InternetGetLastResponseInfo gave us no additional information.
+ }
+ default:
+ CleanupConnection();
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(result.dwError)));
+ return;
+ }
+ } else if (state_ == SETTING_CUR_DIRECTORY) {
+ OnSetCurrentDirectory(result.dwError);
+ } else if (state_ == FINDING_FIRST_FILE) {
+ if (result.dwError != ERROR_SUCCESS) {
+ DWORD result_error = result.dwError;
+ CleanupConnection();
+ // Fixup the error message from our directory/file guessing.
+ if (!is_directory_ && result_error == ERROR_NO_MORE_FILES)
+ result_error = ERROR_PATH_NOT_FOUND;
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(result_error)));
+ return;
+ }
+ request_handle_ = (HINTERNET)result.dwResult;
+ OnFindFirstFile(result.dwError);
+ } else if (state_ == GETTING_DIRECTORY) {
+ OnFindFile(result.dwError);
+ } else if (state_ == GETTING_FILE_HANDLE) {
+ if (result.dwError != ERROR_SUCCESS) {
+ CleanupConnection();
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(result.dwError)));
+ return;
+ }
+ // start reading file contents
+ state_ = GETTING_FILE;
+ request_handle_ = (HINTERNET)result.dwResult;
+ NotifyHeadersComplete();
+ } else {
+ // we don't have IO outstanding. Pass to our base class.
+ URLRequestInetJob::OnIOComplete(result);
+ }
+}
+
+bool URLRequestFtpJob::NeedsAuth() {
+ // Note that we only have to worry about cases where an actual FTP server
+ // requires auth (and not a proxy), because connecting to FTP via proxy
+ // effectively means the browser communicates via HTTP, and uses HTTP's
+ // Proxy-Authenticate protocol when proxy servers require auth.
+ return ((server_auth_ != NULL) &&
+ server_auth_->state == AUTH_STATE_NEED_AUTH);
+}
+
+void URLRequestFtpJob::GetAuthChallengeInfo(
+ scoped_refptr<AuthChallengeInfo>* result) {
+ DCHECK((server_auth_ != NULL) &&
+ (server_auth_->state == AUTH_STATE_NEED_AUTH));
+ scoped_refptr<AuthChallengeInfo> auth_info = new AuthChallengeInfo;
+ auth_info->is_proxy = false;
+ auth_info->host = UTF8ToWide(request_->url().host());
+ auth_info->scheme = L"";
+ auth_info->realm = L"";
+ result->swap(auth_info);
+}
+
+void URLRequestFtpJob::GetCachedAuthData(
+ const AuthChallengeInfo& auth_info,
+ scoped_refptr<AuthData>* auth_data) {
+ *auth_data = request_->context()->ftp_auth_cache()->
+ Lookup(WideToUTF8(auth_info.host));
+}
+
+void URLRequestFtpJob::OnConnect() {
+ DCHECK_EQ(state_, CONNECTING);
+
+ state_ = SETTING_CUR_DIRECTORY;
+ // Setting the directory lets us determine if the URL is a file,
+ // and also keeps the working directory for the FTP session in sync
+ // with what is being displayed in the browser.
+ if (request_->url().has_path()) {
+ std::string unescaped_path;
+ if (UnescapeAndValidatePath(request_, &unescaped_path) &&
+ FtpSetCurrentDirectoryA(connection_handle_,
+ unescaped_path.c_str())) {
+ OnSetCurrentDirectory(ERROR_SUCCESS);
+ } else {
+ ProcessRequestError(GetLastError());
+ }
+ }
+}
+
+void URLRequestFtpJob::OnSetCurrentDirectory(DWORD last_error) {
+ DCHECK_EQ(state_, SETTING_CUR_DIRECTORY);
+
+ is_directory_ = (last_error == ERROR_SUCCESS);
+ // if last_error is not ERROR_SUCCESS, the requested url is either
+ // a file or an invalid path. We optimistically try to read as a file,
+ // and if it fails, we fail.
+ state_ = FINDING_FIRST_FILE;
+
+ std::string unescaped_path;
+ bool is_path_valid = true;
+ if (request_->url().has_path()) {
+ is_path_valid = UnescapeAndValidatePath(request_, &unescaped_path);
+ }
+ if (is_path_valid &&
+ (request_handle_ = FtpFindFirstFileA(connection_handle_,
+ unescaped_path.c_str(),
+ &find_data_, 0,
+ reinterpret_cast<DWORD_PTR>(this)))) {
+ OnFindFirstFile(GetLastError());
+ } else {
+ ProcessRequestError(GetLastError());
+ }
+}
+
+void URLRequestFtpJob::FindNextFile() {
+ DWORD last_error;
+ if (InternetFindNextFileA(request_handle_, &find_data_)) {
+ last_error = ERROR_SUCCESS;
+ } else {
+ last_error = GetLastError();
+ // We'll get ERROR_NO_MORE_FILES if the directory is empty.
+ if (last_error != ERROR_NO_MORE_FILES) {
+ ProcessRequestError(last_error);
+ return;
+ }
+ }
+ // Use InvokeLater to call OnFindFile as it ends up calling us, so we don't
+ // to blow the stack.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFtpJob::OnFindFile, last_error));
+}
+
+void URLRequestFtpJob::OnFindFirstFile(DWORD last_error) {
+ DCHECK_EQ(state_, FINDING_FIRST_FILE);
+ if (!is_directory_) {
+ // Note that it is not enough to just check !is_directory_ and assume
+ // the URL is a file, because is_directory_ is true iff we successfully
+ // set current directory to the URL path. Therefore, the URL could just
+ // be an invalid path. We proceed optimistically and fail in that case.
+ state_ = GETTING_FILE_HANDLE;
+ std::string unescaped_path;
+ if (UnescapeAndValidatePath(request_, &unescaped_path) &&
+ (request_handle_ = FtpOpenFileA(connection_handle_,
+ unescaped_path.c_str(),
+ GENERIC_READ,
+ INTERNET_FLAG_TRANSFER_BINARY,
+ reinterpret_cast<DWORD_PTR>(this)))) {
+ // Start reading file contents
+ state_ = GETTING_FILE;
+ NotifyHeadersComplete();
+ } else {
+ ProcessRequestError(GetLastError());
+ }
+ } else {
+ OnStartDirectoryTraversal();
+ // If we redirect in OnStartDirectoryTraversal() then this request job
+ // is cancelled.
+ if (request_handle_)
+ OnFindFile(last_error);
+ }
+}
+
+void URLRequestFtpJob::OnFindFile(DWORD last_error) {
+ DCHECK_EQ(state_, GETTING_DIRECTORY);
+
+ if (last_error == ERROR_SUCCESS) {
+ // TODO(jabdelmalek): need to add icons for files/folders.
+ int64 size =
+ (static_cast<unsigned __int64>(find_data_.nFileSizeHigh) << 32) |
+ find_data_.nFileSizeLow;
+
+ // We don't know the encoding, and can't assume utf8, so pass the 8bit
+ // directly to the browser for it to decide.
+ string file_entry = net_util::GetDirectoryListingEntry(
+ find_data_.cFileName, find_data_.dwFileAttributes, size,
+ &find_data_.ftLastWriteTime);
+ WriteData(&file_entry, true);
+
+ FindNextFile();
+ return;
+ }
+
+ DCHECK(last_error == ERROR_NO_MORE_FILES);
+ OnFinishDirectoryTraversal();
+}
+
+void URLRequestFtpJob::OnStartDirectoryTraversal() {
+ state_ = GETTING_DIRECTORY;
+
+ // Unescape the URL path and pass the raw 8bit directly to the browser.
+ string html = net_util::GetDirectoryListingHeader(
+ UnescapeURLComponent(request_->url().path(),
+ UnescapeRule::SPACES | UnescapeRule::PERCENTS));
+
+ // If this isn't top level directory (i.e. the path isn't "/",) add a link to
+ // the parent directory.
+ if (request_->url().path().length() > 1)
+ html.append(net_util::GetDirectoryListingEntry("..", 0, 0, NULL));
+
+ WriteData(&html, true);
+
+ NotifyHeadersComplete();
+}
+
+void URLRequestFtpJob::OnFinishDirectoryTraversal() {
+ state_ = DONE;
+}
+
+int URLRequestFtpJob::WriteData(const std::string* data,
+ bool call_io_complete) {
+ int written = 0;
+
+ if (data && data->length())
+ directory_html_.append(*data);
+
+ if (dest_) {
+ size_t bytes_to_copy = std::min(static_cast<size_t>(dest_size_),
+ directory_html_.length());
+ if (bytes_to_copy) {
+ memcpy(dest_, directory_html_.c_str(), bytes_to_copy);
+ directory_html_.erase(0, bytes_to_copy);
+ dest_ = NULL;
+ dest_size_ = NULL;
+ written = static_cast<int>(bytes_to_copy);
+
+ if (call_io_complete) {
+ // Wait a little bit before telling the parent class that we wrote
+ // data. This avoids excessive cycles of us getting one file entry and
+ // telling the parent class to Read().
+ MessageLoop::current()->PostDelayedTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFtpJob::ContinueIOComplete, written),
+ kFtpBufferTimeMs);
+ }
+ }
+ }
+
+ return written;
+}
+
+void URLRequestFtpJob::ContinueIOComplete(int bytes_written) {
+ AsyncResult result;
+ result.dwResult = bytes_written;
+ result.dwError = ERROR_SUCCESS;
+ URLRequestInetJob::OnIOComplete(result);
+}
+
+void URLRequestFtpJob::ContinueNotifyHeadersComplete() {
+ NotifyHeadersComplete();
+}
+
+int URLRequestFtpJob::CallInternetRead(char* dest, int dest_size,
+ int *bytes_read) {
+ int result;
+
+ if (is_directory_) {
+ // Copy the html that we created from the directory listing that we got
+ // from InternetFindNextFile.
+ DCHECK(dest_ == NULL);
+ dest_ = dest;
+ dest_size_ = dest_size;
+
+ DCHECK(state_ == GETTING_DIRECTORY || state_ == DONE);
+ int written = WriteData(NULL, false);
+ if (written) {
+ *bytes_read = written;
+ result = ERROR_SUCCESS;
+ } else {
+ result = state_ == GETTING_DIRECTORY ? ERROR_IO_PENDING : ERROR_SUCCESS;
+ }
+ } else {
+ DWORD bytes_to_read = dest_size;
+ bytes_read_ = 0;
+ // InternetReadFileEx doesn't work for asynchronous FTP, InternetReadFile
+ // must be used instead.
+ if (!InternetReadFile(request_handle_, dest, bytes_to_read, &bytes_read_))
+ return GetLastError();
+
+ *bytes_read = static_cast<int>(bytes_read_);
+ result = ERROR_SUCCESS;
+ }
+
+ return result;
+}
+
+bool URLRequestFtpJob::GetReadBytes(const AsyncResult& result,
+ int* bytes_read) {
+ if (is_directory_) {
+ *bytes_read = static_cast<int>(result.dwResult);
+ } else {
+ if (!result.dwResult)
+ return false;
+
+ // IE5 and later return the number of read bytes in the
+ // INTERNET_ASYNC_RESULT structure. IE4 holds on to the pointer passed in
+ // to InternetReadFile and store it there.
+ *bytes_read = bytes_read_;
+
+ if (!*bytes_read)
+ *bytes_read = result.dwError;
+ }
+
+ return true;
+}
+
+bool URLRequestFtpJob::IsRedirectResponse(GURL* location,
+ int* http_status_code) {
+ if (is_directory_) {
+ std::string ftp_path = request_->url().path();
+ if (!ftp_path.empty() && ('/' != ftp_path[ftp_path.length() - 1])) {
+ ftp_path.push_back('/');
+ GURL::Replacements replacements;
+ replacements.SetPathStr(ftp_path);
+
+ *location = request_->url().ReplaceComponents(replacements);
+ *http_status_code = 301; // simulate a permanent redirect
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/net/url_request/url_request_ftp_job.h b/net/url_request/url_request_ftp_job.h
new file mode 100644
index 0000000..ae47dbf
--- /dev/null
+++ b/net/url_request/url_request_ftp_job.h
@@ -0,0 +1,133 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_FTP_JOB_H__
+#define BASE_URL_REQUEST_URL_REQUEST_FTP_JOB_H__
+
+#include "net/url_request/url_request_inet_job.h"
+
+// A basic FTP job that handles download files and showing directory listings.
+class URLRequestFtpJob : public URLRequestInetJob {
+ public:
+ static URLRequestJob* Factory(URLRequest* request, const std::string& scheme);
+
+ virtual ~URLRequestFtpJob();
+
+ // URLRequestJob methods:
+ virtual void Start();
+ virtual bool GetMimeType(std::string* mime_type);
+
+ // URLRequestInetJob methods:
+ virtual void OnIOComplete(const AsyncResult& result);
+
+ protected:
+ URLRequestFtpJob(URLRequest* request);
+
+ // Starts the WinInet request.
+ virtual void SendRequest();
+
+ virtual int CallInternetRead(char* dest, int dest_size, int *bytes_read);
+ virtual bool GetReadBytes(const AsyncResult& result, int* bytes_read);
+ virtual void OnCancelAuth();
+ virtual void OnSetAuth();
+ virtual bool NeedsAuth();
+ virtual void GetAuthChallengeInfo(scoped_refptr<AuthChallengeInfo>*);
+ virtual void GetCachedAuthData(const AuthChallengeInfo& auth_info,
+ scoped_refptr<AuthData>* auth_data);
+ virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
+
+ private:
+ // Called after InternetConnect successfully connects to server.
+ void OnConnect();
+
+ // Called after FtpSetCurrentDirectory attempts to change current dir.
+ void OnSetCurrentDirectory(DWORD last_error);
+
+ // Requests the next file in the directory listing from WinInet.
+ void FindNextFile();
+
+ // Called when the first file in a directory listing is available.
+ void OnFindFirstFile(DWORD last_error);
+
+ // Called when a file in a directory listing is available.
+ void OnFindFile(DWORD last_error);
+
+ // Call this when starting a directory listing to setup the html.
+ void OnStartDirectoryTraversal();
+
+ // Call this at the end of a directory listing to complete the html.
+ void OnFinishDirectoryTraversal();
+
+ // If given data, writes it to the directory listing html. If
+ // call_io_complete is true, will also notify the parent class that we wrote
+ // data in the given buffer.
+ int WriteData(const std::string* data, bool call_io_complete);
+
+ // Continuation function for calling OnIOComplete through the message loop.
+ virtual void ContinueIOComplete(int bytes_written);
+
+ // Continuation function for calling NotifyHeadersComplete through
+ //the message loop
+ virtual void ContinueNotifyHeadersComplete();
+
+ typedef enum {
+ START = 0x200, // initial state of the ftp job
+ CONNECTING, // opening the url
+ SETTING_CUR_DIRECTORY, // attempting to change current dir to match request
+ FINDING_FIRST_FILE, // retrieving first file information in cur dir (by FtpFindFirstFile)
+ GETTING_DIRECTORY, // retrieving the directory listing (if directory)
+ GETTING_FILE_HANDLE, // initiate access to file by call to FtpOpenFile (if file)
+ GETTING_FILE, // retrieving the file (if file)
+ DONE // URLRequestInetJob is reading the response now
+ } FtpJobState;
+
+ // The FtpJob has several asynchronous operations which happen
+ // in sequence. The state keeps track of which asynchronous IO
+ // is pending at any given point in time.
+ FtpJobState state_;
+
+ // In IE 4 and before, this pointer passed to asynchronous InternetReadFile
+ // calls is where the number of read bytes is written to.
+ DWORD bytes_read_;
+
+ bool is_directory_; // does the url point to a file or directory
+ WIN32_FIND_DATAA find_data_;
+ std::string directory_html_; // if url is directory holds html
+
+ // When building a directory listing, we need to temporarily hold on to the
+ // buffer in between the time a Read() call comes in and we get the file
+ // entry from WinInet.
+ char* dest_;
+ int dest_size_;
+
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestFtpJob);
+};
+
+#endif // #define BASE_URL_REQUEST_URL_REQUEST_FTP_JOB_H__
diff --git a/net/url_request/url_request_http_cache_job.cc b/net/url_request/url_request_http_cache_job.cc
new file mode 100644
index 0000000..9537638
--- /dev/null
+++ b/net/url_request/url_request_http_cache_job.cc
@@ -0,0 +1,539 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_http_cache_job.h"
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/base/cookie_monster.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/http/http_response_info.h"
+#include "net/http/http_transaction.h"
+#include "net/http/http_transaction_factory.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_error_job.h"
+
+// TODO(darin): make sure the port blocking code is not lost
+
+#pragma warning(disable: 4355)
+
+// static
+URLRequestJob* URLRequestHttpCacheJob::Factory(URLRequest* request,
+ const std::string& scheme) {
+ DCHECK(scheme == "http" || scheme == "https");
+
+ if (!net_util::IsPortAllowedByDefault(request->url().IntPort()))
+ return new URLRequestErrorJob(request, net::ERR_UNSAFE_PORT);
+
+ if (!request->context() ||
+ !request->context()->http_transaction_factory()) {
+ NOTREACHED() << "requires a valid context";
+ return new URLRequestErrorJob(request, net::ERR_INVALID_ARGUMENT);
+ }
+
+ return new URLRequestHttpCacheJob(request);
+}
+
+URLRequestHttpCacheJob::URLRequestHttpCacheJob(URLRequest* request)
+ : URLRequestJob(request),
+ context_(request->context()),
+ transaction_(NULL),
+ response_info_(NULL),
+ proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
+ server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
+ start_callback_(this, &URLRequestHttpCacheJob::OnStartCompleted),
+ read_callback_(this, &URLRequestHttpCacheJob::OnReadCompleted),
+ read_in_progress_(false) {
+}
+
+URLRequestHttpCacheJob::~URLRequestHttpCacheJob() {
+ if (transaction_)
+ DestroyTransaction();
+}
+
+void URLRequestHttpCacheJob::SetUpload(net::UploadData* upload) {
+ DCHECK(!transaction_) << "cannot change once started";
+ request_info_.upload_data = upload;
+}
+
+void URLRequestHttpCacheJob::SetExtraRequestHeaders(
+ const std::string& headers) {
+ DCHECK(!transaction_) << "cannot change once started";
+ request_info_.extra_headers = headers;
+}
+
+void URLRequestHttpCacheJob::Start() {
+ DCHECK(!transaction_);
+
+ // TODO(darin): URLRequest::referrer() should return a GURL
+ GURL referrer(request_->referrer());
+
+ // Ensure that we do not send username and password fields in the referrer.
+ if (referrer.has_username() || referrer.has_password()) {
+ GURL::Replacements referrer_mods;
+ referrer_mods.ClearUsername();
+ referrer_mods.ClearPassword();
+ referrer = referrer.ReplaceComponents(referrer_mods);
+ }
+
+ request_info_.url = request_->url();
+ request_info_.referrer = referrer;
+ request_info_.method = request_->method();
+ request_info_.load_flags = request_->load_flags();
+
+ if (request_->context())
+ request_info_.user_agent = request_->context()->user_agent();
+
+ AddExtraHeaders();
+
+ StartTransaction();
+}
+
+void URLRequestHttpCacheJob::Kill() {
+ if (!transaction_)
+ return;
+
+ DestroyTransaction();
+ URLRequestJob::Kill();
+}
+
+net::LoadState URLRequestHttpCacheJob::GetLoadState() const {
+ return transaction_ ? transaction_->GetLoadState() : net::LOAD_STATE_IDLE;
+}
+
+uint64 URLRequestHttpCacheJob::GetUploadProgress() const {
+ return transaction_ ? transaction_->GetUploadProgress() : 0;
+}
+
+bool URLRequestHttpCacheJob::GetMimeType(std::string* mime_type) {
+ DCHECK(transaction_);
+
+ if (!response_info_)
+ return false;
+
+ return response_info_->headers->GetMimeType(mime_type);
+}
+
+bool URLRequestHttpCacheJob::GetCharset(std::string* charset) {
+ DCHECK(transaction_);
+
+ if (!response_info_)
+ return false;
+
+ return response_info_->headers->GetCharset(charset);
+}
+
+void URLRequestHttpCacheJob::GetResponseInfo(net::HttpResponseInfo* info) {
+ DCHECK(request_);
+ DCHECK(transaction_);
+
+ if (response_info_)
+ *info = *response_info_;
+}
+
+bool URLRequestHttpCacheJob::GetResponseCookies(
+ std::vector<std::string>* cookies) {
+ DCHECK(transaction_);
+
+ if (!response_info_)
+ return false;
+
+ if (response_cookies_.empty())
+ FetchResponseCookies();
+
+ cookies->clear();
+ cookies->swap(response_cookies_);
+ return true;
+}
+
+int URLRequestHttpCacheJob::GetResponseCode() {
+ DCHECK(transaction_);
+
+ if (!response_info_)
+ return -1;
+
+ return response_info_->headers->response_code();
+}
+
+bool URLRequestHttpCacheJob::GetContentEncoding(std::string* encoding_type) {
+ DCHECK(transaction_);
+
+ if (!response_info_)
+ return false;
+
+ // TODO(darin): what if there are multiple content encodings?
+ return response_info_->headers->EnumerateHeader(NULL, "Content-Encoding",
+ encoding_type);
+}
+
+bool URLRequestHttpCacheJob::IsRedirectResponse(GURL* location,
+ int* http_status_code) {
+ if (!response_info_)
+ return false;
+
+ std::string value;
+ if (!response_info_->headers->IsRedirect(&value))
+ return false;
+
+ *location = request_->url().Resolve(value);
+ *http_status_code = response_info_->headers->response_code();
+ return true;
+}
+
+bool URLRequestHttpCacheJob::IsSafeRedirect(const GURL& location) {
+ // We only allow redirects to certain "safe" protocols. This does not
+ // restrict redirects to externally handled protocols. Our consumer would
+ // need to take care of those.
+
+ if (!URLRequest::IsHandledURL(location))
+ return true;
+
+ static const char* kSafeSchemes[] = {
+ "http",
+ "https",
+ "ftp"
+ };
+
+ for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
+ if (location.SchemeIs(kSafeSchemes[i]))
+ return true;
+ }
+
+ return false;
+}
+
+bool URLRequestHttpCacheJob::NeedsAuth() {
+ int code = GetResponseCode();
+ if (code == -1)
+ return false;
+
+ // Check if we need either Proxy or WWW Authentication. This could happen
+ // because we either provided no auth info, or provided incorrect info.
+ switch (code) {
+ case 407:
+ if (proxy_auth_state_ == AUTH_STATE_CANCELED)
+ return false;
+ proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
+ return true;
+ case 401:
+ if (server_auth_state_ == AUTH_STATE_CANCELED)
+ return false;
+ server_auth_state_ = AUTH_STATE_NEED_AUTH;
+ return true;
+ }
+ return false;
+}
+
+void URLRequestHttpCacheJob::GetAuthChallengeInfo(
+ scoped_refptr<AuthChallengeInfo>* result) {
+ DCHECK(transaction_);
+ DCHECK(response_info_);
+
+ // sanity checks:
+ DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
+ server_auth_state_ == AUTH_STATE_NEED_AUTH);
+ DCHECK(response_info_->headers->response_code() == 401 ||
+ response_info_->headers->response_code() == 407);
+
+ *result = response_info_->auth_challenge;
+}
+
+void URLRequestHttpCacheJob::GetCachedAuthData(
+ const AuthChallengeInfo& auth_info,
+ scoped_refptr<AuthData>* auth_data) {
+ AuthCache* auth_cache =
+ request_->context()->http_transaction_factory()->GetAuthCache();
+ if (!auth_cache) {
+ *auth_data = NULL;
+ return;
+ }
+ std::string auth_cache_key = AuthCache::HttpKey(request_->url(),
+ auth_info);
+ *auth_data = auth_cache->Lookup(auth_cache_key);
+}
+
+void URLRequestHttpCacheJob::SetAuth(const std::wstring& username,
+ const std::wstring& password) {
+ DCHECK(transaction_);
+
+ // Proxy gets set first, then WWW.
+ if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
+ proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
+ } else {
+ DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH);
+ server_auth_state_ = AUTH_STATE_HAVE_AUTH;
+ }
+
+ // These will be reset in OnStartCompleted.
+ response_info_ = NULL;
+ response_cookies_.clear();
+
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+ int rv = transaction_->RestartWithAuth(username, password,
+ &start_callback_);
+ if (rv == net::ERR_IO_PENDING)
+ return;
+
+ // The transaction started synchronously, but we need to notify the
+ // URLRequest delegate via the message loop.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestHttpCacheJob::OnStartCompleted, rv));
+}
+
+void URLRequestHttpCacheJob::CancelAuth() {
+ // Proxy gets set first, then WWW.
+ if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
+ proxy_auth_state_ = AUTH_STATE_CANCELED;
+ } else {
+ DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH);
+ server_auth_state_ = AUTH_STATE_CANCELED;
+ }
+
+ // These will be reset in OnStartCompleted.
+ response_info_ = NULL;
+ response_cookies_.clear();
+
+ // OK, let the consumer read the error page...
+ //
+ // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
+ // which will cause the consumer to receive OnResponseStarted instead of
+ // OnAuthRequired.
+ //
+ // We have to do this via InvokeLater to avoid "recursing" the consumer.
+ //
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestHttpCacheJob::OnStartCompleted, net::OK));
+}
+
+void URLRequestHttpCacheJob::ContinueDespiteLastError() {
+ DCHECK(transaction_);
+ DCHECK(!response_info_) << "should not have a response yet";
+
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+ int rv = transaction_->RestartIgnoringLastError(&start_callback_);
+ if (rv == net::ERR_IO_PENDING)
+ return;
+
+ // The transaction started synchronously, but we need to notify the
+ // URLRequest delegate via the message loop.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestHttpCacheJob::OnStartCompleted, rv));
+}
+
+bool URLRequestHttpCacheJob::GetMoreData() {
+ return transaction_ && !read_in_progress_;
+}
+
+bool URLRequestHttpCacheJob::ReadRawData(char* buf, int buf_size,
+ int *bytes_read) {
+ DCHECK_NE(buf_size, 0);
+ DCHECK(bytes_read);
+ DCHECK(!read_in_progress_);
+
+ int rv = transaction_->Read(buf, buf_size, &read_callback_);
+ if (rv >= 0) {
+ *bytes_read = rv;
+ return true;
+ }
+
+ if (rv == net::ERR_IO_PENDING) {
+ read_in_progress_ = true;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ }
+
+ return false;
+}
+
+void URLRequestHttpCacheJob::OnStartCompleted(int result) {
+ // If the request was destroyed, then there is no more work to do.
+ if (!request_ || !request_->delegate())
+ return;
+
+ // If the transaction was destroyed, then the job was cancelled, and
+ // we can just ignore this notification.
+ if (!transaction_)
+ return;
+
+ // Clear the IO_PENDING status
+ SetStatus(URLRequestStatus());
+
+ if (result == net::OK) {
+ NotifyHeadersComplete();
+ } else if (net::IsCertificateError(result)) {
+ // We encountered an SSL certificate error. Ask our delegate to decide
+ // what we should do.
+ // TODO(wtc): also pass ssl_info.cert_status, or just pass the whole
+ // ssl_info.
+ request_->delegate()->OnSSLCertificateError(
+ request_, result, transaction_->GetResponseInfo()->ssl_info.cert);
+ } else {
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+ }
+}
+
+void URLRequestHttpCacheJob::OnReadCompleted(int result) {
+ read_in_progress_ = false;
+
+ if (result == 0) {
+ NotifyDone(URLRequestStatus());
+ } else if (result < 0) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ } else {
+ // Clear the IO_PENDING status
+ SetStatus(URLRequestStatus());
+ }
+
+ NotifyReadComplete(result);
+}
+
+void URLRequestHttpCacheJob::NotifyHeadersComplete() {
+ DCHECK(!response_info_);
+
+ response_info_ = transaction_->GetResponseInfo();
+
+ // Get the Set-Cookie values, and send them to our cookie database.
+
+ FetchResponseCookies();
+
+ URLRequestContext* ctx = request_->context();
+ if (ctx && ctx->cookie_store() &&
+ ctx->cookie_policy()->CanSetCookie(request_->url(),
+ request_->policy_url()))
+ ctx->cookie_store()->SetCookies(request_->url(), response_cookies_);
+
+ URLRequestJob::NotifyHeadersComplete();
+}
+
+void URLRequestHttpCacheJob::DestroyTransaction() {
+ DCHECK(transaction_);
+
+ transaction_->Destroy();
+ transaction_ = NULL;
+ response_info_ = NULL;
+}
+
+void URLRequestHttpCacheJob::StartTransaction() {
+ // NOTE: This method assumes that request_info_ is already setup properly.
+
+ // Create a transaction.
+ DCHECK(!transaction_);
+
+ DCHECK(request_->context());
+ DCHECK(request_->context()->http_transaction_factory());
+
+ transaction_ =
+ request_->context()->http_transaction_factory()->CreateTransaction();
+
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+ int rv;
+ if (transaction_) {
+ rv = transaction_->Start(&request_info_, &start_callback_);
+ if (rv == net::ERR_IO_PENDING)
+ return;
+ } else {
+ rv = net::ERR_FAILED;
+ }
+
+ // The transaction started synchronously, but we need to notify the
+ // URLRequest delegate via the message loop.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestHttpCacheJob::OnStartCompleted, rv));
+}
+
+void URLRequestHttpCacheJob::AddExtraHeaders() {
+ URLRequestContext* context = request_->context();
+ if (context) {
+ // Add in the cookie header. TODO might we need more than one header?
+ if (context->cookie_store() &&
+ context->cookie_policy()->CanGetCookies(request_->url(),
+ request_->policy_url())) {
+ std::string cookies = request_->context()->cookie_store()->
+ GetCookiesWithOptions(request_->url(),
+ CookieMonster::INCLUDE_HTTPONLY);
+ if (!cookies.empty())
+ request_info_.extra_headers += "Cookie: " + cookies + "\r\n";
+ }
+ if (!context->accept_language().empty())
+ request_info_.extra_headers += "Accept-Language: " +
+ context->accept_language() + "\r\n";
+ if (!context->accept_charset().empty())
+ request_info_.extra_headers += "Accept-Charset: " +
+ context->accept_charset() + "\r\n";
+ }
+
+#ifdef CHROME_LAST_MINUTE
+ // Tell the server what compression formats we support.
+ request_info_.extra_headers += "Accept-Encoding: gzip,deflate,bzip2\r\n";
+#else
+ // Tell the server that we support gzip/deflate encoding.
+ request_info_.extra_headers += "Accept-Encoding: gzip,deflate";
+
+ // const string point to google domain
+ static const char kGoogleDomain[] = "google.com";
+ static const unsigned int kGoogleDomainLen = arraysize(kGoogleDomain) - 1;
+ static const char kLocalHostName[] = "localhost";
+
+ // At now, only support bzip2 feature for those requests which are
+ // sent to google domain or localhost.
+ // TODO(jnd) : we will remove the "google.com" domain check before launch.
+ // See bug : 861940
+ const std::string &host = request_->url().host();
+
+ if (host == kLocalHostName ||
+ request_->url().DomainIs(kGoogleDomain, kGoogleDomainLen)) {
+ request_info_.extra_headers += ",bzip2\r\n";
+ } else {
+ request_info_.extra_headers += "\r\n";
+ }
+#endif
+}
+
+void URLRequestHttpCacheJob::FetchResponseCookies() {
+ DCHECK(response_info_);
+ DCHECK(response_cookies_.empty());
+
+ std::string name = "Set-Cookie";
+ std::string value;
+
+ void* iter = NULL;
+ while (response_info_->headers->EnumerateHeader(&iter, name, &value))
+ response_cookies_.push_back(value);
+}
diff --git a/net/url_request/url_request_http_cache_job.h b/net/url_request/url_request_http_cache_job.h
new file mode 100644
index 0000000..261d66b
--- /dev/null
+++ b/net/url_request/url_request_http_cache_job.h
@@ -0,0 +1,112 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_HTTP_CACHE_JOB_H__
+#define NET_URL_REQUEST_URL_REQUEST_HTTP_CACHE_JOB_H__
+
+#include "net/base/completion_callback.h"
+#include "net/http/http_request_info.h"
+#include "net/url_request/url_request_job.h"
+
+namespace net {
+class HttpResponseInfo;
+class HttpTransaction;
+}
+class URLRequestContext;
+
+// A URLRequestJob subclass that is built on top of the HttpCache. It provides
+// an implementation for both HTTP and HTTPS.
+class URLRequestHttpCacheJob : public URLRequestJob {
+ public:
+ static URLRequestJob* Factory(URLRequest* request, const std::string& scheme);
+
+ virtual ~URLRequestHttpCacheJob();
+
+ protected:
+ URLRequestHttpCacheJob(URLRequest* request);
+
+ // URLRequestJob methods:
+ virtual void SetUpload(net::UploadData* upload);
+ virtual void SetExtraRequestHeaders(const std::string& headers);
+ virtual void Start();
+ virtual void Kill();
+ virtual net::LoadState GetLoadState() const;
+ virtual uint64 GetUploadProgress() const;
+ virtual bool GetMimeType(std::string* mime_type);
+ virtual bool GetCharset(std::string* charset);
+ virtual void GetResponseInfo(net::HttpResponseInfo* info);
+ virtual bool GetResponseCookies(std::vector<std::string>* cookies);
+ virtual int GetResponseCode();
+ virtual bool GetContentEncoding(std::string* encoding_type);
+ virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
+ virtual bool IsSafeRedirect(const GURL& location);
+ virtual bool NeedsAuth();
+ virtual void GetAuthChallengeInfo(scoped_refptr<AuthChallengeInfo>*);
+ virtual void GetCachedAuthData(const AuthChallengeInfo& auth_info,
+ scoped_refptr<AuthData>* auth_data);
+ virtual void SetAuth(const std::wstring& username,
+ const std::wstring& password);
+ virtual void CancelAuth();
+ virtual void ContinueDespiteLastError();
+ virtual bool GetMoreData();
+ virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+
+ // Shadows URLRequestJob's version of this method so we can grab cookies.
+ void NotifyHeadersComplete();
+
+ void DestroyTransaction();
+ void StartTransaction();
+ void AddExtraHeaders();
+ void FetchResponseCookies();
+
+ void OnStartCompleted(int result);
+ void OnReadCompleted(int result);
+
+ net::HttpRequestInfo request_info_;
+ net::HttpTransaction* transaction_;
+ const net::HttpResponseInfo* response_info_;
+ std::vector<std::string> response_cookies_;
+
+ // Auth states for proxy and origin server.
+ AuthState proxy_auth_state_;
+ AuthState server_auth_state_;
+
+ net::CompletionCallbackImpl<URLRequestHttpCacheJob> start_callback_;
+ net::CompletionCallbackImpl<URLRequestHttpCacheJob> read_callback_;
+
+ bool read_in_progress_;
+
+ // Keep a reference to the url request context to be sure it's not
+ // deleted before us.
+ scoped_refptr<URLRequestContext> context_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestHttpCacheJob);
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_HTTP_CACHE_JOB_H__
diff --git a/net/url_request/url_request_inet_job.cc b/net/url_request/url_request_inet_job.cc
new file mode 100644
index 0000000..ab4f91b
--- /dev/null
+++ b/net/url_request/url_request_inet_job.cc
@@ -0,0 +1,462 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_inet_job.h"
+
+#include <algorithm>
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/auth.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/base/wininet_util.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_ftp_job.h"
+#include "net/url_request/url_request_job_metrics.h"
+#include "net/url_request/url_request_job_tracker.h"
+
+//
+// HOW ASYNC IO WORKS
+//
+// The URLRequestInet* classes are now fully asynchronous. This means that
+// all IO operations pass buffers into WinInet, and as WinInet completes those
+// IO requests, it will fill the buffer, and then callback to the client.
+// Asynchronous IO Operations include:
+// HttpSendRequestEx
+// InternetWriteFile
+// HttpEndRequest
+// InternetOpenUrl
+// InternetReadFile (for FTP)
+// InternetReadFileEx (for HTTP)
+// InternetCloseHandle
+//
+// To understand how this works, you need to understand the basic class
+// hierarchy for the URLRequestJob classes:
+//
+// URLRequestJob
+// |
+// +--------------+-------------------+
+// | |
+// (Other Job Types) URLRequestInetJob
+// e.g. | |
+// URLRequestFileJob URLRequestFtpJob URLRequestHttpJob
+// |
+// URLRequestHttpUploadJob
+//
+//
+// To make this work, each URLRequestInetJob has a virtual method called
+// OnIOComplete(). If a derived URLRequestInetJob class issues
+// an asynchronous IO, it must override the OnIOComplete method
+// to handle the IO completion. Once it has overridden this method,
+// *all* asynchronous IO completions will come to this method, even
+// those asynchronous IOs which may have been issued by a base class.
+// For example, URLRequestInetJob has methods which Read from the
+// connection asynchronously. Once URLRequestHttpJob overrides
+// OnIOComplete (so that it can receive its own async IO callbacks)
+// it will also receive the URLRequestInetJob async IO callbacks. To
+// make this work, the derived class must track its own state, and call
+// the base class' version of OnIOComplete if appropriate.
+//
+
+
+using namespace std;
+
+using net::WinInetUtil;
+
+static const wchar_t kWndClass[] = L"URLRequestMessageWnd";
+
+// Custom message types for use with message_hwnd
+enum {
+ MSG_REQUEST_COMPLETE = WM_USER + 1
+};
+
+HINTERNET URLRequestInetJob::the_internet_ = NULL;
+HWND URLRequestInetJob::message_hwnd_ = NULL;
+#ifndef NDEBUG
+MessageLoop* URLRequestInetJob::my_message_loop_ = NULL;
+#endif
+
+URLRequestInetJob::URLRequestInetJob(URLRequest* request)
+ : URLRequestJob(request),
+ connection_handle_(NULL),
+ request_handle_(NULL),
+ last_error_(ERROR_SUCCESS),
+ is_waiting_(false),
+ read_in_progress_(false) {
+ // TODO(darin): we should re-create the internet if the UA string changes,
+ // but we have to be careful about existing users of this internet.
+ if (!the_internet_) {
+ InitializeTheInternet(
+ request->context() ? request->context()->user_agent() : std::string());
+ }
+#ifndef NDEBUG
+ DCHECK(MessageLoop::current() == my_message_loop_) <<
+ "All URLRequests should happen on the same thread";
+#endif
+}
+
+URLRequestInetJob::~URLRequestInetJob() {
+ DCHECK(!request_) << "request should be detached at this point";
+
+ // The connections may have already been cleaned up. It is ok to call
+ // CleanupConnection again to make sure the resource is properly released.
+ // See bug 684997.
+ CleanupConnection();
+}
+
+void URLRequestInetJob::Kill() {
+ CleanupConnection();
+
+ // Dispatch the NotifyDone message to the URLRequest
+ URLRequestJob::Kill();
+}
+
+void URLRequestInetJob::SetAuth(const wstring& username,
+ const wstring& password) {
+ DCHECK((proxy_auth_ != NULL && proxy_auth_->state == AUTH_STATE_NEED_AUTH) ||
+ (server_auth_ != NULL &&
+ (server_auth_->state == AUTH_STATE_NEED_AUTH)));
+
+ // Proxy gets set first, then WWW.
+ AuthData* auth =
+ (proxy_auth_ != NULL && proxy_auth_->state == AUTH_STATE_NEED_AUTH ?
+ proxy_auth_.get() : server_auth_.get());
+
+ if (auth) {
+ auth->state = AUTH_STATE_HAVE_AUTH;
+ auth->username = username;
+ auth->password = password;
+ }
+
+ // Resend the request with the new username and password.
+ // Do this asynchronously in case we were called from within a
+ // NotifyDataAvailable callback.
+ // TODO(mpcomplete): hmm... is it possible 'this' gets deleted before the task
+ // is run?
+ OnSetAuth();
+}
+
+void URLRequestInetJob::CancelAuth() {
+ DCHECK((proxy_auth_ != NULL && proxy_auth_->state == AUTH_STATE_NEED_AUTH) ||
+ (server_auth_ != NULL &&
+ (server_auth_->state == AUTH_STATE_NEED_AUTH)));
+
+ // Proxy gets set first, then WWW.
+ AuthData* auth =
+ (proxy_auth_ != NULL && proxy_auth_->state == AUTH_STATE_NEED_AUTH ?
+ proxy_auth_.get() : server_auth_.get());
+
+ if (auth) {
+ auth->state = AUTH_STATE_CANCELED;
+ }
+
+ // Once the auth is cancelled, we proceed with the request as though
+ // there were no auth. So, send the OnResponseStarted. Schedule this
+ // for later so that we don't cause any recursing into the caller
+ // as a result of this call.
+ OnCancelAuth();
+}
+
+void URLRequestInetJob::OnIOComplete(const AsyncResult& result) {
+ URLRequestStatus status;
+
+ if (read_in_progress_) {
+ read_in_progress_ = false;
+ int bytes_read = 0;
+ if (GetReadBytes(result, &bytes_read)) {
+ SetStatus(status);
+ if (bytes_read == 0) {
+ NotifyDone(status);
+ CleanupConnection();
+ }
+ } else {
+ bytes_read = -1;
+ URLRequestStatus status;
+ status.set_status(URLRequestStatus::FAILED);
+ status.set_os_error(WinInetUtil::OSErrorToNetError(result.dwError));
+ NotifyDone(status);
+ CleanupConnection();
+ }
+ NotifyReadComplete(bytes_read);
+ } else {
+ // If we get here, an IO is completing which we didn't
+ // start or we lost track of our state.
+ NOTREACHED();
+ }
+}
+
+bool URLRequestInetJob::ReadRawData(char* dest, int dest_size,
+ int *bytes_read) {
+ if (is_done())
+ return 0;
+
+ DCHECK_NE(dest_size, 0);
+ DCHECK_NE(bytes_read, (int*)NULL);
+ DCHECK(!read_in_progress_);
+
+ *bytes_read = 0;
+
+ int result = CallInternetRead(dest, dest_size, bytes_read);
+ if (result == ERROR_SUCCESS) {
+ DLOG(INFO) << "read " << *bytes_read << " bytes";
+ if (*bytes_read == 0)
+ CleanupConnection(); // finished reading all the data
+ return true;
+ }
+
+ if (ProcessRequestError(result))
+ read_in_progress_ = true;
+
+ // Whether we had an error or the request is pending.
+ // Both of these cases return false.
+ return false;
+}
+
+void URLRequestInetJob::CallOnIOComplete(const AsyncResult& result) {
+ // It's important to clear this flag before calling OnIOComplete
+ is_waiting_ = false;
+
+ // the job could have completed with an error while the message was pending
+ if (is_done()) {
+ Release(); // may destroy self if last reference
+ return;
+ }
+
+ // Verify that our status is currently set to IO_PENDING and
+ // reset it on success.
+ DCHECK(GetStatus().is_io_pending());
+ if (result.dwResult && result.dwError == 0)
+ SetStatus(URLRequestStatus());
+
+ OnIOComplete(result);
+
+ Release(); // may destroy self if last reference
+}
+
+bool URLRequestInetJob::ProcessRequestError(int error) {
+ if (error == ERROR_IO_PENDING) {
+ DLOG(INFO) << "waiting for WinInet call to complete";
+ AddRef(); // balanced in CallOnIOComplete
+ is_waiting_ = true;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return true;
+ }
+ DLOG(ERROR) << "WinInet call failed: " << error;
+ CleanupConnection();
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ WinInetUtil::OSErrorToNetError(error)));
+ return false;
+}
+
+bool URLRequestInetJob::GetMoreData() {
+ if (!is_waiting_ && !is_done()) {
+ // The connection is still in the middle of transmission.
+ // Return true so InternetReadFileExA can be called again.
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void URLRequestInetJob::CleanupConnection() {
+ if (!request_handle_ && !connection_handle_)
+ return; // nothing to clean up
+
+ if (request_handle_) {
+ CleanupHandle(request_handle_);
+ request_handle_ = NULL;
+ }
+ if (connection_handle_) {
+ CleanupHandle(connection_handle_);
+ connection_handle_ = NULL;
+ }
+}
+
+void URLRequestInetJob::CleanupHandle(HINTERNET handle) {
+ // We no longer need notifications from this connection.
+ InternetSetStatusCallback(handle, NULL);
+
+ if (!InternetCloseHandle(handle)) {
+ // InternetCloseHandle is evil. The documentation specifies that it
+ // either succeeds immediately or returns ERROR_IO_PENDING if there is
+ // something outstanding, in which case the close will happen automagically
+ // later. In either of these cases, it will call us back with
+ // INTERNET_STATUS_HANDLE_CLOSING (because we set up the async callbacks)
+ // and we simply do nothing for the message.
+ //
+ // However, sometimes it also seems to fail with ERROR_INVALID_HANDLE.
+ // This seems to happen when we cancel before it has called us back with
+ // data. For example, if we cancel during DNS resolution or while waiting
+ // for a slow server.
+ //
+ // Our speculation is that in these cases WinInet creates a handle for
+ // us with an internal structure, but that the driver has not yet called
+ // it back with a "real" handle (the driver level is probably what
+ // generates IO_PENDING). The driver has not yet specified a handle, which
+ // causes WinInet to barf.
+ //
+ // However, in this case, the cancel seems to work. The TCP connection is
+ // closed and we still get a callback that the handle is being closed. Yay.
+ //
+ // We assert that the error is either of these two because we aren't sure
+ // if any other error values could also indicate this bogus condition, and
+ // we want to notice if we do something wrong that causes a real error.
+ DWORD last_error = GetLastError();
+ DCHECK(last_error == ERROR_INVALID_HANDLE) <<
+ "Unknown error when closing handle, possibly leaking job";
+ if (ERROR_IO_PENDING == last_error) {
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+ async_result_.dwError = ERROR_INTERNET_CONNECTION_ABORTED;
+ async_result_.dwResult = reinterpret_cast<DWORD_PTR>(handle);
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestInetJob::CallOnIOComplete, async_result_));
+ }
+ }
+}
+
+// static
+HINTERNET URLRequestInetJob::GetTheInternet() {
+ return the_internet_;
+}
+
+// static
+void URLRequestInetJob::InitializeTheInternet(const std::string& user_agent) {
+ // construct message window for processsing
+ HINSTANCE hinst = GetModuleHandle(NULL);
+
+ WNDCLASSEX wc = {0};
+ wc.cbSize = sizeof(wc);
+ wc.lpfnWndProc = URLRequestWndProc;
+ wc.hInstance = hinst;
+ wc.lpszClassName = kWndClass;
+ RegisterClassEx(&wc);
+
+ message_hwnd_ = CreateWindow(kWndClass, 0, 0, 0, 0, 0, 0, HWND_MESSAGE, 0,
+ hinst, 0);
+ if (!message_hwnd_) {
+ NOTREACHED() << "error: " << GetLastError();
+ return;
+ }
+
+ // Hack attack. We are hitting a deadlock in wininet deinitialization.
+ // What is happening is that when we deinitialize, FreeLibrary will be
+ // called on wininet. The loader lock is held, and wininet!DllMain is
+ // called. The problem is that wininet tries to do a bunch of cleanup
+ // in their DllMain, including calling ICAsyncThread::~ICASyncThread.
+ // This tries to shutdown the "select thread", and then does a
+ // WaitForSingleObject on the thread with a 5 sec timeout. However the
+ // thread they are waiting for cannot exit because the thread shutdown
+ // routine (LdrShutdownThread) is trying to acquire the loader lock.
+ // This causes chrome.exe to hang for 5 seconds on shutdown before the
+ // process will exit. Making sure we close our wininet handles did not help.
+ //
+ // Since DLLs are reference counted, we inflate the reference count on
+ // wininet so that it will never be deinitialized :)
+ LoadLibraryA("wininet");
+
+ the_internet_ = InternetOpenA(user_agent.c_str(),
+ INTERNET_OPEN_TYPE_PRECONFIG,
+ NULL, // no proxy override
+ NULL, // no proxy bypass list
+ INTERNET_FLAG_ASYNC);
+ InternetSetStatusCallback(the_internet_, URLRequestStatusCallback);
+
+ // Keep track of this message loop so we can catch callers who don't make
+ // requests on the same thread. Only do this in debug mode; in release mode
+ // my_message_loop_ doesn't exist.
+#ifndef NDEBUG
+ DCHECK(!my_message_loop_) << "InitializeTheInternet() called twice";
+ DCHECK(my_message_loop_ = MessageLoop::current());
+#endif
+}
+
+// static
+LRESULT CALLBACK URLRequestInetJob::URLRequestWndProc(HWND hwnd,
+ UINT message,
+ WPARAM wparam,
+ LPARAM lparam) {
+ URLRequestInetJob* job = reinterpret_cast<URLRequestInetJob*>(wparam);
+ HINTERNET handle = reinterpret_cast<HINTERNET>(lparam);
+
+ switch (message) {
+ case MSG_REQUEST_COMPLETE: {
+ // The callback will be reset if we have closed the handle and deleted
+ // the job instance. Call CallOnIOComplete only if the handle still
+ // has a valid callback.
+ INTERNET_STATUS_CALLBACK callback = NULL;
+ DWORD option_buffer_size = sizeof(callback);
+ if (InternetQueryOption(handle, INTERNET_OPTION_CALLBACK,
+ &callback, &option_buffer_size)
+ && (NULL != callback)) {
+ const AsyncResult& r = job->async_result_;
+ DLOG(INFO) << "REQUEST_COMPLETE: job=" << job << ", result=" <<
+ (void*) r.dwResult << ", error=" << r.dwError;
+ job->CallOnIOComplete(r);
+ }
+ break;
+ }
+ default:
+ return DefWindowProc(hwnd, message, wparam, lparam);
+ }
+
+ return 0;
+}
+
+// static
+void CALLBACK URLRequestInetJob::URLRequestStatusCallback(
+ HINTERNET handle, DWORD_PTR job_id, DWORD status, LPVOID status_info,
+ DWORD status_info_len) {
+ UINT message = 0;
+ LPARAM message_param = 0;
+ switch (status) {
+ case INTERNET_STATUS_REQUEST_COMPLETE: {
+ message = MSG_REQUEST_COMPLETE;
+ DCHECK(status_info_len == sizeof(INTERNET_ASYNC_RESULT));
+ LPINTERNET_ASYNC_RESULT r =
+ static_cast<LPINTERNET_ASYNC_RESULT>(status_info);
+ URLRequestInetJob* job = reinterpret_cast<URLRequestInetJob*>(job_id);
+ job->async_result_.dwResult = r->dwResult;
+ job->async_result_.dwError = r->dwError;
+ message_param = reinterpret_cast<LPARAM>(handle);
+ break;
+ }
+ case INTERNET_STATUS_USER_INPUT_REQUIRED:
+ case INTERNET_STATUS_STATE_CHANGE:
+ // TODO(darin): This is probably a security problem. Do something better.
+ ResumeSuspendedDownload(handle, 0);
+ break;
+ }
+
+ if (message)
+ PostMessage(URLRequestInetJob::message_hwnd_, message,
+ static_cast<WPARAM>(job_id), message_param);
+}
diff --git a/net/url_request/url_request_inet_job.h b/net/url_request/url_request_inet_job.h
new file mode 100644
index 0000000..a82d754
--- /dev/null
+++ b/net/url_request/url_request_inet_job.h
@@ -0,0 +1,184 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_INET_JOB_H__
+#define NET_URL_REQUEST_URL_REQUEST_INET_JOB_H__
+
+#include <windows.h>
+#include <wininet.h>
+
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+class AuthData;
+class MessageLoop;
+
+// For all WinInet-based URL requests
+class URLRequestInetJob : public URLRequestJob {
+ public:
+ URLRequestInetJob(URLRequest* request);
+ virtual ~URLRequestInetJob();
+
+ virtual void SetExtraRequestHeaders(const std::string& headers) {
+ extra_request_headers_ = headers;
+ }
+
+ virtual void Kill();
+ virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+
+ // URLRequestJob Authentication methods
+ virtual void SetAuth(const std::wstring& username,
+ const std::wstring& password);
+ virtual void CancelAuth();
+
+ // A structure holding the result and error code of an asynchronous IO.
+ // This is a copy of INTERNET_ASYNC_RESULT.
+ struct AsyncResult {
+ DWORD_PTR dwResult;
+ DWORD dwError;
+ };
+
+ // A virtual method to handle WinInet callbacks. If this class
+ // issues asynchronous IO, it will need to override this method
+ // to receive completions of those asynchronous IOs. The class
+ // must track whether it has an async IO outstanding, and if it
+ // does not it must call the base class' OnIOComplete.
+ virtual void OnIOComplete(const AsyncResult& result) = 0;
+
+ // Used internally to setup the OnIOComplete call. Public because this
+ // is called from the Windows procedure, and we don't want to make it a
+ // friend so we can avoid the Windows headers for this header file.
+ void CallOnIOComplete(const AsyncResult& result);
+
+ HINTERNET request_handle() const { return request_handle_; }
+
+protected:
+ // Called by this class and subclasses to send or resend this request.
+ virtual void SendRequest() = 0;
+
+ // Calls InternetReadFile(Ex) depending on the derived class.
+ // Returns ERROR_SUCCESS on success, or else a standard Windows error code
+ // on failure (from GetLastError()).
+ virtual int CallInternetRead(char* dest, int dest_size, int *bytes_read) = 0;
+
+ // After the base class calls CallInternetRead and the result is available,
+ // it will call this method to get the number of received bytes.
+ virtual bool GetReadBytes(const AsyncResult& result, int* bytes_read) = 0;
+
+ // Called by this class and subclasses whenever a WinInet call fails. This
+ // method returns true if the error just means that we have to wait for
+ // OnIOComplete to be called.
+ bool ProcessRequestError(int error);
+
+ // Called by URLRequestJob to get more data from the data stream of this job.
+ virtual bool GetMoreData();
+
+ // Cleans up the connection, if necessary, and closes the connection and
+ // request handles. May be called multiple times, it will be a NOP if
+ // there is nothing to do.
+ void CleanupConnection();
+
+ // Closes the given handle.
+ void CleanupHandle(HINTERNET handle);
+
+ // Returns the global handle to the internet (NOT the same as the connection
+ // or request handle below)
+ static HINTERNET GetTheInternet();
+
+ // Makes appropriate asynch call to re-send a request based on
+ // dynamic scheme type and user action at authentication prompt
+ //(OK or Cancel)
+ virtual void OnCancelAuth() = 0;
+ virtual void OnSetAuth() = 0;
+
+ // Handle of the connection for this request. This handle is created
+ // by subclasses that create the connection according to their requirements.
+ // It will be automatically destroyed by this class when the connection is
+ // being closed. See also 'request_handle_'
+ HINTERNET connection_handle_;
+
+ // Handle of the specific request created by subclasses to meet their own
+ // requirements. This handle has a more narrow scope than the connection
+ // handle. If non-null, it will be automatically destroyed by this class
+ // when the connection is being closed. It will be destroyed before the
+ // connection handle.
+ HINTERNET request_handle_;
+
+ // The last error that occurred. Used by ContinueDespiteLastError to adjust
+ // the request's load_flags to ignore this error.
+ DWORD last_error_;
+
+ // Any extra request headers (\n-delimited) that should be included in the
+ // request.
+ std::string extra_request_headers_;
+
+ // Authentication information.
+ scoped_refptr<AuthData> proxy_auth_;
+ scoped_refptr<AuthData> server_auth_;
+
+ private:
+
+ // One-time global state setup
+ static void InitializeTheInternet(const std::string& user_agent);
+
+ // Runs on the thread where the first URLRequest was created
+ static LRESULT CALLBACK URLRequestWndProc(HWND hwnd, UINT message,
+ WPARAM wparam, LPARAM lparam);
+
+ // Runs on some background thread (called by WinInet)
+ static void CALLBACK URLRequestStatusCallback(HINTERNET handle,
+ DWORD_PTR job_id,
+ DWORD status,
+ LPVOID status_info,
+ DWORD status_info_len);
+
+ static HINTERNET the_internet_;
+ static HWND message_hwnd_;
+#ifndef NDEBUG
+ static MessageLoop* my_message_loop_; // Used to sanity-check that all
+ // requests are made on the same
+ // thread
+#endif
+
+ // true if waiting for OnIOComplete to be called
+ bool is_waiting_;
+
+ // debugging state - is there a read already in progress
+ bool read_in_progress_;
+
+ // The result and error code of asynchronous IO. It is modified by the
+ // status callback functions on asynchronous IO completion and passed to
+ // CallOnIOComplete. Since there is at most one pending IO, the object
+ // can reuse the async_result_ member for all its asynchronous IOs.
+ AsyncResult async_result_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestInetJob);
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_INET_JOB_H__
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
new file mode 100644
index 0000000..6485489
--- /dev/null
+++ b/net/url_request/url_request_job.cc
@@ -0,0 +1,497 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_job.h"
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/auth.h"
+#include "net/base/net_errors.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job_metrics.h"
+#include "net/url_request/url_request_job_tracker.h"
+
+// Buffer size allocated when de-compressing data.
+static const int kFilterBufSize = 32 * 1024;
+
+URLRequestJob::URLRequestJob(URLRequest* request)
+ : request_(request),
+ read_buffer_(NULL),
+ read_buffer_len_(0),
+ has_handled_response_(false),
+ done_(false),
+ expected_content_size_(-1) {
+ is_profiling_ = request->enable_profiling();
+ if (is_profiling()) {
+ metrics_.reset(new URLRequestJobMetrics());
+ metrics_->start_time_ = TimeTicks::Now();
+ }
+ g_url_request_job_tracker.AddNewJob(this);
+}
+
+URLRequestJob::~URLRequestJob() {
+ g_url_request_job_tracker.RemoveJob(this);
+}
+
+void URLRequestJob::Kill() {
+ // Make sure the request is notified that we are done. We assume that the
+ // request took care of setting its error status before calling Kill.
+ if (request_)
+ NotifyCanceled();
+}
+
+void URLRequestJob::DetachRequest() {
+ request_ = NULL;
+}
+
+void URLRequestJob::SetupFilter() {
+ std::string encoding_type;
+ if (GetContentEncoding(&encoding_type)) {
+ std::string mime_type;
+ GetMimeType(&mime_type);
+ filter_.reset(Filter::Factory(encoding_type, mime_type, kFilterBufSize));
+ }
+}
+
+// This function calls ReadData to get stream data. If a filter exists, passes
+// the data to the attached filter. Then returns the output from filter back to
+// the caller.
+bool URLRequestJob::Read(char* buf, int buf_size, int *bytes_read) {
+ bool rv = false;
+
+ DCHECK_LT(buf_size, 1000000); // sanity check
+ DCHECK(buf);
+ DCHECK(bytes_read);
+
+ *bytes_read = 0;
+
+ // Skip Filter if not present
+ if (!filter_.get()) {
+ rv = ReadRawData(buf, buf_size, bytes_read);
+ if (rv && *bytes_read > 0)
+ RecordBytesRead(*bytes_read);
+ } else {
+ // Get more pre-filtered data if needed.
+ int filtered_data_read = 0;
+
+ // Save the caller's buffers while we do IO
+ // in the filter's buffers.
+ read_buffer_ = buf;
+ read_buffer_len_ = buf_size;
+
+ if (ReadFilteredData(bytes_read)) {
+ rv = true; // we have data to return
+ } else {
+ rv = false; // error, or a new IO is pending
+ }
+ }
+ if (rv && *bytes_read == 0)
+ NotifyDone(URLRequestStatus());
+ return rv;
+}
+
+bool URLRequestJob::ReadRawDataForFilter(int *bytes_read) {
+ bool rv = false;
+
+ DCHECK(bytes_read);
+ DCHECK(filter_.get());
+
+ *bytes_read = 0;
+
+ // Get more pre-filtered data if needed.
+ // TODO(mbelshe): is it possible that the filter needs *MORE* data
+ // when there is some data already in the buffer?
+ if (!filter_->stream_data_len() && !is_done()) {
+ char* stream_buffer = filter_->stream_buffer();
+ int stream_buffer_size = filter_->stream_buffer_size();
+ rv = ReadRawData(stream_buffer, stream_buffer_size, bytes_read);
+ if (rv && *bytes_read > 0)
+ RecordBytesRead(*bytes_read);
+ }
+ return rv;
+}
+
+void URLRequestJob::FilteredDataRead(int bytes_read) {
+ DCHECK(filter_.get()); // don't add data if there is no filter
+ filter_->FlushStreamBuffer(bytes_read);
+}
+
+bool URLRequestJob::ReadFilteredData(int *bytes_read) {
+ DCHECK(filter_.get()); // don't add data if there is no filter
+ DCHECK(read_buffer_ != NULL); // we need to have a buffer to fill
+ DCHECK(read_buffer_len_ > 0); // sanity check
+ DCHECK(read_buffer_len_ < 1000000); // sanity check
+
+ bool rv = false;
+ *bytes_read = 0;
+
+ if (is_done())
+ return true;
+
+ if (!filter_->stream_data_len()) {
+ // We don't have any raw data to work with, so
+ // read from the socket.
+
+ int filtered_data_read;
+ if (ReadRawDataForFilter(&filtered_data_read)) {
+ if (filtered_data_read > 0) {
+ filter_->FlushStreamBuffer(filtered_data_read);
+ } else {
+ return true; // EOF
+ }
+ } else {
+ return false; // IO Pending (or error)
+ }
+ }
+
+ if (filter_->stream_data_len() && !is_done()) {
+ // Get filtered data
+ int filtered_data_len = read_buffer_len_;
+ Filter::FilterStatus status;
+ status = filter_->ReadFilteredData(read_buffer_, &filtered_data_len);
+ switch (status) {
+ case Filter::FILTER_DONE: {
+ *bytes_read = filtered_data_len;
+ rv = true;
+ break;
+ }
+ case Filter::FILTER_NEED_MORE_DATA: {
+ // We have finished filtering all data currently in the buffer.
+ // There might be some space left in the output buffer. One can
+ // consider reading more data from the stream to feed the filter
+ // and filling up the output buffer. This leads to more complicated
+ // buffer management and data notification mechanisms.
+ // We can revisit this issue if there is a real perf need.
+ if (filtered_data_len > 0) {
+ *bytes_read = filtered_data_len;
+ rv = true;
+ } else {
+ // Read again since we haven't received enough data yet (e.g., we may
+ // not have a complete gzip header yet)
+ rv = ReadFilteredData(bytes_read);
+ }
+ break;
+ }
+ case Filter::FILTER_OK: {
+ *bytes_read = filtered_data_len;
+ rv = true;
+ break;
+ }
+ case Filter::FILTER_ERROR: {
+ // TODO: Figure out a better error code.
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, net::ERR_FAILED));
+ rv = false;
+ break;
+ }
+ default: {
+ NOTREACHED();
+ rv = false;
+ break;
+ }
+ }
+ } else {
+ // we are done, or there is no data left.
+ rv = true;
+ }
+
+ if (rv) {
+ // When we successfully finished a read, we no longer need to
+ // save the caller's buffers. For debugging purposes, we clear
+ // them out.
+ read_buffer_ = NULL;
+ read_buffer_len_ = 0;
+ }
+ return rv;
+}
+
+bool URLRequestJob::ReadRawData(char* buf, int buf_size, int *bytes_read) {
+ DCHECK(bytes_read);
+ *bytes_read = 0;
+ NotifyDone(URLRequestStatus());
+ return false;
+}
+
+URLRequestJobMetrics* URLRequestJob::RetrieveMetrics() {
+ if (is_profiling())
+ return metrics_.release();
+ else
+ return NULL;
+}
+
+void URLRequestJob::NotifyHeadersComplete() {
+ if (!request_ || !request_->delegate())
+ return; // The request was destroyed, so there is no more work to do.
+
+ if (has_handled_response_)
+ return;
+
+ DCHECK(!request_->status().is_io_pending());
+
+ // Initialize to the current time, and let the subclass optionally override
+ // the time stamps if it has that information. The default request_time is
+ // set by URLRequest before it calls our Start method.
+ request_->response_info_.response_time = Time::Now();
+ GetResponseInfo(&request_->response_info_);
+
+ // When notifying the delegate, the delegate can release the request
+ // (and thus release 'this'). After calling to the delgate, we must
+ // check the request pointer to see if it still exists, and return
+ // immediately if it has been destroyed. self_preservation ensures our
+ // survival until we can get out of this method.
+ scoped_refptr<URLRequestJob> self_preservation = this;
+
+ int http_status_code;
+ GURL new_location;
+ if (IsRedirectResponse(&new_location, &http_status_code)) {
+ const GURL& url = request_->url();
+
+ // Move the reference fragment of the old location to the new one if the
+ // new one has none. This duplicates mozilla's behavior.
+ if (url.is_valid() && url.has_ref() && !new_location.has_ref()) {
+ GURL::Replacements replacements;
+ // Reference the |ref| directly out of the original URL to avoid a
+ // malloc.
+ replacements.SetRef(url.spec().data(),
+ url.parsed_for_possibly_invalid_spec().ref);
+ new_location = new_location.ReplaceComponents(replacements);
+ }
+
+ // Toggle this flag to true so the consumer can access response headers.
+ // Then toggle it back if we choose to follow the redirect.
+ has_handled_response_ = true;
+ request_->delegate()->OnReceivedRedirect(request_, new_location);
+
+ // Ensure that the request wasn't destroyed in OnReceivedRedirect
+ if (!request_ || !request_->delegate())
+ return;
+
+ // If we were not cancelled, then follow the redirect.
+ if (request_->status().is_success()) {
+ has_handled_response_ = false;
+ FollowRedirect(new_location, http_status_code);
+ return;
+ }
+ } else if (NeedsAuth()) {
+ scoped_refptr<AuthChallengeInfo> auth_info;
+ GetAuthChallengeInfo(&auth_info);
+ // Need to check for a NULL auth_info because the server may have failed
+ // to send a challenge with the 401 response.
+ if (auth_info) {
+ scoped_refptr<AuthData> auth_data;
+ GetCachedAuthData(*auth_info, &auth_data);
+ if (auth_data) {
+ SetAuth(auth_data->username, auth_data->password);
+ return;
+ }
+ request_->delegate()->OnAuthRequired(request_, auth_info);
+ // Wait for SetAuth or CancelAuth to be called.
+ return;
+ }
+ }
+
+ has_handled_response_ = true;
+ if (request_->status().is_success())
+ SetupFilter();
+
+ if (!filter_.get()) {
+ std::string content_length;
+ request_->GetResponseHeaderByName("content-length", &content_length);
+ if (!content_length.empty())
+ expected_content_size_ = StringToInt64(content_length);
+ }
+
+ request_->delegate()->OnResponseStarted(request_);
+}
+
+void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
+ DCHECK(!has_handled_response_);
+ has_handled_response_ = true;
+ if (request_) {
+ request_->set_status(status);
+ if (request_->delegate())
+ request_->delegate()->OnResponseStarted(request_);
+ }
+}
+
+void URLRequestJob::NotifyReadComplete(int bytes_read) {
+ if (!request_ || !request_->delegate())
+ return; // The request was destroyed, so there is no more work to do.
+
+ // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
+ // unit_tests have been fixed to not trip this.
+ //DCHECK(!request_->status().is_io_pending());
+
+ // The headers should be complete before reads complete
+ DCHECK(has_handled_response_);
+
+ if (bytes_read > 0)
+ RecordBytesRead(bytes_read);
+
+ // Don't notify if we had an error.
+ if (!request_->status().is_success())
+ return;
+
+ // When notifying the delegate, the delegate can release the request
+ // (and thus release 'this'). After calling to the delgate, we must
+ // check the request pointer to see if it still exists, and return
+ // immediately if it has been destroyed. self_preservation ensures our
+ // survival until we can get out of this method.
+ scoped_refptr<URLRequestJob> self_preservation = this;
+
+ if (filter_.get()) {
+ // Tell the filter that it has more data
+ FilteredDataRead(bytes_read);
+
+ // Filter the data.
+ int filter_bytes_read = 0;
+ if (ReadFilteredData(&filter_bytes_read))
+ request_->delegate()->OnReadCompleted(request_, filter_bytes_read);
+ } else {
+ request_->delegate()->OnReadCompleted(request_, bytes_read);
+ }
+}
+
+void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
+ DCHECK(!done_) << "Job sending done notification twice";
+ if (done_)
+ return;
+ done_ = true;
+
+ if (is_profiling() && metrics_->total_bytes_read_ > 0) {
+ // There are valid IO statistics. Fill in other fields of metrics for
+ // profiling consumers to retrieve information.
+ metrics_->original_url_.reset(new GURL(request_->original_url()));
+ metrics_->end_time_ = TimeTicks::Now();
+ metrics_->success_ = status.is_success();
+
+ if (!(request_->original_url() == request_->url())) {
+ metrics_->url_.reset(new GURL(request_->url()));
+ }
+ } else {
+ metrics_.reset();
+ }
+
+
+ // Unless there was an error, we should have at least tried to handle
+ // the response before getting here.
+ DCHECK(has_handled_response_ || !status.is_success());
+
+ // In the success case, we cannot send the NotifyDone now. It can only be
+ // sent after the request is completed, because otherwise we can get the
+ // NotifyDone() called while the delegate is still accessing the request.
+ // In the case of an error, we are fre
+ if (status.is_success()) {
+ // If there is data left in the filter, then something is probably wrong.
+ DCHECK(!FilterHasData());
+ }
+
+ // As with NotifyReadComplete, we need to take care to notice if we were
+ // destroyed during a delegate callback.
+ if (request_) {
+ request_->set_is_pending(false);
+ // With async IO, it's quite possible to have a few outstanding
+ // requests. We could receive a request to Cancel, followed shortly
+ // by a successful IO. For tracking the status(), once there is
+ // an error, we do not change the status back to success. To
+ // enforce this, only set the status if the job is so far
+ // successful.
+ if (request_->status().is_success())
+ request_->set_status(status);
+ }
+
+ g_url_request_job_tracker.OnJobDone(this, status);
+
+ // Complete this notification later. This prevents us from re-entering the
+ // delegate if we're done because of a synchronous call.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestJob::CompleteNotifyDone));
+}
+
+void URLRequestJob::CompleteNotifyDone() {
+ // Check if we should notify the delegate that we're done because of an error.
+ if (request_ &&
+ !request_->status().is_success() &&
+ request_->delegate()) {
+ // We report the error differently depending on whether we've called
+ // OnResponseStarted yet.
+ if (has_handled_response_) {
+ // We signal the error by calling OnReadComplete with a bytes_read of -1.
+ request_->delegate()->OnReadCompleted(request_, -1);
+ } else {
+ has_handled_response_ = true;
+ request_->delegate()->OnResponseStarted(request_);
+ }
+ }
+}
+
+void URLRequestJob::NotifyCanceled() {
+ if (!done_) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED,
+ net::ERR_ABORTED));
+ }
+}
+
+bool URLRequestJob::FilterHasData() {
+ return filter_.get() && filter_->stream_data_len();
+}
+
+void URLRequestJob::FollowRedirect(const GURL& location,
+ int http_status_code) {
+ g_url_request_job_tracker.OnJobRedirect(this, location, http_status_code);
+ Kill();
+ // Kill could have notified the Delegate and destroyed the request.
+ if (!request_)
+ return;
+
+ int rv = request_->Redirect(location, http_status_code);
+ if (rv != net::OK)
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, rv));
+}
+
+void URLRequestJob::RecordBytesRead(int bytes_read) {
+ if (is_profiling()) {
+ ++(metrics_->number_of_read_IO_);
+ metrics_->total_bytes_read_ += bytes_read;
+ }
+ g_url_request_job_tracker.OnBytesRead(this, bytes_read);
+}
+
+const URLRequestStatus URLRequestJob::GetStatus() {
+ if (request_)
+ return request_->status();
+ // If the request is gone, we must be cancelled.
+ return URLRequestStatus(URLRequestStatus::CANCELED,
+ net::ERR_ABORTED);
+}
+
+void URLRequestJob::SetStatus(const URLRequestStatus &status) {
+ if (request_)
+ request_->set_status(status);
+}
diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h
new file mode 100644
index 0000000..60a870d
--- /dev/null
+++ b/net/url_request/url_request_job.h
@@ -0,0 +1,336 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_JOB_H__
+#define BASE_URL_REQUEST_URL_REQUEST_JOB_H__
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/ref_counted.h"
+#include "net/base/auth.h"
+#include "net/base/filter.h"
+#include "net/base/load_states.h"
+#include "net/url_request/url_request_status.h"
+
+namespace net {
+class HttpResponseInfo;
+class UploadData;
+}
+
+class GURL;
+class URLRequest;
+class URLRequestJobMetrics;
+
+class URLRequestJob : public base::RefCounted<URLRequestJob> {
+ public:
+ URLRequestJob(URLRequest* request);
+ virtual ~URLRequestJob();
+
+ // Returns the request that owns this job. THIS POINTER MAY BE NULL if the
+ // request was destroyed.
+ URLRequest* request() const {
+ return request_;
+ }
+
+ // Sets the upload data, most requests have no upload data, so this is a NOP.
+ // Job types supporting upload data will override this.
+ virtual void SetUpload(net::UploadData* upload) { }
+
+ // Sets extra request headers for Job types that support request headers.
+ virtual void SetExtraRequestHeaders(const std::string& headers) { }
+
+ // If any error occurs while starting the Job, NotifyStartError should be called.
+ // This helps ensure that all errors follow more similar notification code
+ // paths, which should simplify testing.
+ virtual void Start() = 0;
+
+ // This function MUST somehow call NotifyDone/NotifyCanceled or some requests
+ // will get leaked. Certain callers use that message to know when they can
+ // delete their URLRequest object, even when doing a cancel.
+ //
+ // The job should endeavor to stop working as soon as is convenient, but must
+ // not send and complete notifications from inside this function. Instead,
+ // complete notifications (including "canceled") should be sent from a
+ // callback run from the message loop.
+ //
+ // The job is not obliged to immediately stop sending data in response to
+ // this call, nor is it obliged to fail with "canceled" unless not all data
+ // was sent as a result. A typical case would be where the job is almost
+ // complete and can succeed before the canceled notification can be
+ // dispatched (from the message loop).
+ //
+ // The job should be prepared to receive multiple calls to kill it, but only
+ // one notification must be issued.
+ virtual void Kill();
+
+ // Called to detach the request from this Job. Results in the Job being
+ // killed off eventually. The job must not use the request pointer any more.
+ void DetachRequest();
+
+ // Called to read post-filtered data from this Job, returning the number of
+ // bytes read, 0 when there is no more data, or -1 if there was an error.
+ // This is just the backend for URLRequest::Read, see that function for more
+ // info.
+ bool Read(char* buf, int buf_size, int *bytes_read);
+
+ // Called to fetch the current load state for the job.
+ virtual net::LoadState GetLoadState() const { return net::LOAD_STATE_IDLE; }
+
+ // Called to get the upload progress in bytes.
+ virtual uint64 GetUploadProgress() const { return 0; }
+
+ // Called to fetch the mime_type for this request. Only makes sense for some
+ // types of requests. Returns true on success. Calling this on a type that
+ // doesn't have a mime type will return false.
+ virtual bool GetMimeType(std::string* mime_type) { return false; }
+
+ // Called to fetch the charset for this request. Only makes sense for some
+ // types of requests. Returns true on success. Calling this on a type that
+ // doesn't have a charset will return false.
+ virtual bool GetCharset(std::string* charset) { return false; }
+
+ // Called to get response info.
+ virtual void GetResponseInfo(net::HttpResponseInfo* info) {}
+
+ // Returns the cookie values included in the response, if applicable.
+ // Returns true if applicable.
+ // NOTE: This removes the cookies from the job, so it will only return
+ // useful results once per job.
+ virtual bool GetResponseCookies(std::vector<std::string>* cookies) {
+ return false;
+ }
+
+ // Returns the HTTP response code for the request.
+ virtual int GetResponseCode() { return -1; }
+
+ // Called to fetch the encoding type for this request. Only makes sense for
+ // some types of requests. Returns true on success. Calling this on a request
+ // that doesn't have or specify an encoding type will return false.
+ virtual bool GetContentEncoding(std::string* encoding_type) { return false; }
+
+ // Called to setup stream filter for this request. An example of filter is
+ // content encoding/decoding.
+ void SetupFilter();
+
+ // Called to determine if this response is a redirect. Only makes sense
+ // for some types of requests. This method returns true if the response
+ // is a redirect, and fills in the location param with the URL of the
+ // redirect. The HTTP status code (e.g., 302) is filled into
+ // |*http_status_code| to signify the type of redirect.
+ //
+ // The caller is responsible for following the redirect by setting up an
+ // appropriate replacement Job. Note that the redirected location may be
+ // invalid, the caller should be sure it can handle this.
+ virtual bool IsRedirectResponse(GURL* location,
+ int* http_status_code) {
+ return false;
+ }
+
+ // Called to determine if it is okay to redirect this job to the specified
+ // location. This may be used to implement protocol-specific restrictions.
+ // If this function returns false, then the URLRequest will fail reporting
+ // net::ERR_UNSAFE_REDIRECT.
+ virtual bool IsSafeRedirect(const GURL& location) {
+ return true;
+ }
+
+ // Called to determine if this response is asking for authentication. Only
+ // makes sense for some types of requests. The caller is responsible for
+ // obtaining the credentials passing them to SetAuth.
+ virtual bool NeedsAuth() { return false; }
+
+ // Fills the authentication info with the server's response.
+ virtual void GetAuthChallengeInfo(
+ scoped_refptr<AuthChallengeInfo>* auth_info) {
+ // This will only be called if NeedsAuth() returns true, in which
+ // case the derived class should implement this!
+ NOTREACHED();
+ }
+
+ // Returns cached auth data for the auth challenge. Returns NULL if there
+ // is no auth cache or if the auth cache doesn't have the auth data for
+ // the auth challenge.
+ virtual void GetCachedAuthData(const AuthChallengeInfo& auth_info,
+ scoped_refptr<AuthData>* auth_data) {
+ *auth_data = NULL;
+ }
+
+ // Resend the request with authentication credentials.
+ virtual void SetAuth(const std::wstring& username,
+ const std::wstring& password) {
+ // This will only be called if NeedsAuth() returns true, in which
+ // case the derived class should implement this!
+ NOTREACHED();
+ }
+
+ // Display the error page without asking for credentials again.
+ virtual void CancelAuth() {
+ // This will only be called if NeedsAuth() returns true, in which
+ // case the derived class should implement this!
+ NOTREACHED();
+ }
+
+ // Continue processing the request ignoring the last error.
+ virtual void ContinueDespiteLastError() {
+ // Implementations should know how to recover from errors they generate.
+ // If this code was reached, we are trying to recover from an error that
+ // we don't know how to recover from.
+ NOTREACHED();
+ }
+
+ // Returns true if the Job is done producing response data and has called
+ // NotifyDone on the request.
+ bool is_done() const { return done_; }
+
+ // Returns true if the job is doing performance profiling
+ bool is_profiling() const { return is_profiling_; }
+
+ // Retrieve the performance measurement of the job. The data is encapsulated
+ // with a URLRequestJobMetrics object. The caller owns this object from now
+ // on.
+ URLRequestJobMetrics* RetrieveMetrics();
+
+ // Get/Set expected content size
+ int64 expected_content_size() const { return expected_content_size_; }
+ void set_expected_content_size(const int64& size) {
+ expected_content_size_ = size;
+ }
+
+ protected:
+ // Notifies the job that headers have been received.
+ void NotifyHeadersComplete();
+
+ // Notifies the request that the job has completed a Read operation.
+ void NotifyReadComplete(int bytes_read);
+
+ // Notifies the request that a start error has occurred.
+ void NotifyStartError(const URLRequestStatus &status);
+
+ // NotifyDone marks when we are done with a request. It is really
+ // a glorified set_status, but also does internal state checking and
+ // job tracking. It should be called once per request, when the job is
+ // finished doing all IO.
+ void NotifyDone(const URLRequestStatus &status);
+
+ // Some work performed by NotifyDone must be completed on a separate task
+ // so as to avoid re-entering the delegate. This method exists to perform
+ // that work.
+ void CompleteNotifyDone();
+
+ // Used as an asynchronous callback for Kill to notify the URLRequest that
+ // we were canceled.
+ void NotifyCanceled();
+
+ // Called to get more data from the request response. Returns true if there
+ // is data immediately available to read. Return false otherwise.
+ // Internally this function may initiate I/O operations to get more data.
+ virtual bool GetMoreData() { return false; }
+
+ // Called to read raw (pre-filtered) data from this Job.
+ // If returning true, data was read from the job. buf will contain
+ // the data, and bytes_read will receive the number of bytes read.
+ // If returning true, and bytes_read is returned as 0, there is no
+ // additional data to be read.
+ // If returning false, an error occurred or an async IO is now pending.
+ // If async IO is pending, the status of the request will be
+ // URLRequestStatus::IO_PENDING, and buf must remain available until the
+ // operation is completed. See comments on URLRequest::Read for more info.
+ virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+
+ // Informs the filter that data has been read into its buffer
+ void FilteredDataRead(int bytes_read);
+
+ // Reads filtered data from the request. Returns true if successful,
+ // false otherwise. Note, if there is not enough data received to
+ // return data, this call can issue a new async IO request under
+ // the hood.
+ bool ReadFilteredData(int *bytes_read);
+
+ // The request that initiated this job. This value MAY BE NULL if the
+ // request was released by DetachRequest().
+ URLRequest* request_;
+
+ // The status of the job.
+ const URLRequestStatus GetStatus();
+
+ // Set the status of the job.
+ void SetStatus(const URLRequestStatus &status);
+
+ // Whether the job is doing performance profiling
+ bool is_profiling_;
+
+ // Contains IO performance measurement when profiling is enabled.
+ scoped_ptr<URLRequestJobMetrics> metrics_;
+
+ private:
+ // When data filtering is enabled, this function is used to read data
+ // for the filter. Returns true if raw data was read. Returns false if
+ // an error occurred (or we are waiting for IO to complete).
+ bool ReadRawDataForFilter(int *bytes_read);
+
+ // Called in response to a redirect that was not canceled to follow the
+ // redirect. The current job will be replaced with a new job loading the
+ // given redirect destination.
+ void FollowRedirect(const GURL& location, int http_status_code);
+
+ // Updates the profiling info and notifies observers that bytes_read bytes
+ // have been read.
+ void RecordBytesRead(int bytes_read);
+
+ private:
+ // Called to query whether there is data available in the filter to be read
+ // out.
+ bool FilterHasData();
+
+ // Indicates that the job is done producing data, either it has completed
+ // all the data or an error has been encountered. Set exclusively by
+ // NotifyDone so that it is kept in sync with the request.
+ bool done_;
+
+ // The data stream filter which is enabled on demand.
+ scoped_ptr<Filter> filter_;
+ // When we filter data, we receive data into the filter buffers. After
+ // processing the filtered data, we return the data in the caller's buffer.
+ // While the async IO is in progress, we save the user buffer here, and
+ // when the IO completes, we fill this in.
+ char *read_buffer_;
+ int read_buffer_len_;
+
+ // Used by HandleResponseIfNecessary to track whether we've sent the
+ // OnResponseStarted callback and potentially redirect callbacks as well.
+ bool has_handled_response_;
+
+ // Expected content size
+ int64 expected_content_size_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestJob);
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_JOB_H__
diff --git a/net/url_request/url_request_job_manager.cc b/net/url_request/url_request_job_manager.cc
new file mode 100644
index 0000000..4d03ccf
--- /dev/null
+++ b/net/url_request/url_request_job_manager.cc
@@ -0,0 +1,178 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_job_manager.h"
+
+#include "base/string_util.h"
+#include "net/base/load_flags.h"
+#include "net/url_request/url_request_about_job.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_file_job.h"
+#include "net/url_request/url_request_ftp_job.h"
+#include "net/url_request/url_request_http_cache_job.h"
+#include "net/url_request/url_request_view_cache_job.h"
+
+// The built-in set of protocol factories
+static const struct {
+ const char* scheme;
+ URLRequest::ProtocolFactory* factory;
+} kBuiltinFactories[] = {
+ { "http", URLRequestHttpCacheJob::Factory },
+ { "https", URLRequestHttpCacheJob::Factory },
+ { "file", URLRequestFileJob::Factory },
+ { "ftp", URLRequestFtpJob::Factory },
+ { "about", URLRequestAboutJob::Factory },
+ { "view-cache", URLRequestViewCacheJob::Factory },
+};
+
+URLRequestJobManager::URLRequestJobManager() {
+#ifndef NDEBUG
+ allowed_thread_ = NULL;
+#endif
+}
+
+URLRequestJob* URLRequestJobManager::CreateJob(URLRequest* request) const {
+#ifndef NDEBUG
+ DCHECK(IsAllowedThread());
+#endif
+
+ // If we are given an invalid URL, then don't even try to inspect the scheme.
+ if (!request->url().is_valid())
+ return new URLRequestErrorJob(request, net::ERR_INVALID_URL);
+
+ const std::string& scheme = request->url().scheme(); // already lowercase
+
+ // We do this here to avoid asking interceptors about unsupported schemes.
+ if (!SupportsScheme(scheme))
+ return new URLRequestErrorJob(request, net::ERR_UNKNOWN_URL_SCHEME);
+
+ // THREAD-SAFETY NOTICE:
+ // We do not need to acquire the lock here since we are only reading our
+ // data structures. They should only be modified on the current thread.
+
+ // See if the request should be intercepted.
+ if (!(request->load_flags() & net::LOAD_DISABLE_INTERCEPT)) {
+ InterceptorList::const_iterator i;
+ for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+ URLRequestJob* job = (*i)->MaybeIntercept(request);
+ if (job)
+ return job;
+ }
+ }
+
+ // See if the request should be handled by a registered protocol factory.
+ // If the registered factory returns null, then we want to fall-back to the
+ // built-in protocol factory.
+ FactoryMap::const_iterator i = factories_.find(scheme);
+ if (i != factories_.end()) {
+ URLRequestJob* job = i->second(request, scheme);
+ if (job)
+ return job;
+ }
+
+ // See if the request should be handled by a built-in protocol factory.
+ for (size_t i = 0; i < arraysize(kBuiltinFactories); ++i) {
+ if (scheme == kBuiltinFactories[i].scheme) {
+ URLRequestJob* job = (kBuiltinFactories[i].factory)(request, scheme);
+ DCHECK(job); // The built-in factories are not expected to fail!
+ return job;
+ }
+ }
+
+ // If we reached here, then it means that a registered protocol factory
+ // wasn't interested in handling the URL. That is fairly unexpected, and we
+ // don't know have a specific error to report here :-(
+ return new URLRequestErrorJob(request, net::ERR_FAILED);
+}
+
+bool URLRequestJobManager::SupportsScheme(const std::string& scheme) const {
+ // The set of registered factories may change on another thread.
+ {
+ AutoLock locked(lock_);
+ if (factories_.find(scheme) != factories_.end())
+ return true;
+ }
+
+ for (size_t i = 0; i < arraysize(kBuiltinFactories); ++i)
+ if (LowerCaseEqualsASCII(scheme, kBuiltinFactories[i].scheme))
+ return true;
+
+ return false;
+}
+
+URLRequest::ProtocolFactory* URLRequestJobManager::RegisterProtocolFactory(
+ const std::string& scheme,
+ URLRequest::ProtocolFactory* factory) {
+#ifndef NDEBUG
+ DCHECK(IsAllowedThread());
+#endif
+
+ AutoLock locked(lock_);
+
+ URLRequest::ProtocolFactory* old_factory;
+ FactoryMap::iterator i = factories_.find(scheme);
+ if (i != factories_.end()) {
+ old_factory = i->second;
+ } else {
+ old_factory = NULL;
+ }
+ if (factory) {
+ factories_[scheme] = factory;
+ } else if (i != factories_.end()) { // uninstall any old one
+ factories_.erase(i);
+ }
+ return old_factory;
+}
+
+void URLRequestJobManager::RegisterRequestInterceptor(
+ URLRequest::Interceptor* interceptor) {
+#ifndef NDEBUG
+ DCHECK(IsAllowedThread());
+#endif
+
+ AutoLock locked(lock_);
+
+ DCHECK(std::find(interceptors_.begin(), interceptors_.end(), interceptor) ==
+ interceptors_.end());
+ interceptors_.push_back(interceptor);
+}
+
+void URLRequestJobManager::UnregisterRequestInterceptor(
+ URLRequest::Interceptor* interceptor) {
+#ifndef NDEBUG
+ DCHECK(IsAllowedThread());
+#endif
+
+ AutoLock locked(lock_);
+
+ InterceptorList::iterator i =
+ std::find(interceptors_.begin(), interceptors_.end(), interceptor);
+ DCHECK(i != interceptors_.end());
+ interceptors_.erase(i);
+}
diff --git a/net/url_request/url_request_job_manager.h b/net/url_request/url_request_job_manager.h
new file mode 100644
index 0000000..f10d5ee
--- /dev/null
+++ b/net/url_request/url_request_job_manager.h
@@ -0,0 +1,100 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_JOB_MANAGER_H__
+#define NET_URL_REQUEST_URL_REQUEST_JOB_MANAGER_H__
+
+#include <map>
+
+#include "base/lock.h"
+#include "net/url_request/url_request.h"
+
+// This class is responsible for managing the set of protocol factories and
+// request interceptors that determine how an URLRequestJob gets created to
+// handle an URLRequest.
+//
+// MULTI-THREADING NOTICE:
+// URLRequest is designed to have all consumers on a single thread, and so no
+// attempt is made to support ProtocolFactory or Interceptor instances being
+// registered/unregistered or in any way poked on multiple threads. However,
+// we do support checking for supported schemes FROM ANY THREAD (i.e., it is
+// safe to call SupportsScheme on any thread).
+//
+class URLRequestJobManager {
+ public:
+ URLRequestJobManager();
+
+ // Instantiate an URLRequestJob implementation based on the registered
+ // interceptors and protocol factories. This will always succeed in
+ // returning a job unless we are--in the extreme case--out of memory.
+ URLRequestJob* CreateJob(URLRequest* request) const;
+
+ // Returns true if there is a protocol factory registered for the given
+ // scheme. Note: also returns true if there is a built-in handler for the
+ // given scheme.
+ bool SupportsScheme(const std::string& scheme) const;
+
+ // Register a protocol factory associated with the given scheme. The factory
+ // parameter may be null to clear any existing association. Returns the
+ // previously registered protocol factory if any.
+ URLRequest::ProtocolFactory* RegisterProtocolFactory(
+ const std::string& scheme, URLRequest::ProtocolFactory* factory);
+
+ // Register/unregister a request interceptor.
+ void RegisterRequestInterceptor(URLRequest::Interceptor* interceptor);
+ void UnregisterRequestInterceptor(URLRequest::Interceptor* interceptor);
+
+ private:
+ typedef std::map<std::string,URLRequest::ProtocolFactory*> FactoryMap;
+ typedef std::vector<URLRequest::Interceptor*> InterceptorList;
+
+ mutable Lock lock_;
+ FactoryMap factories_;
+ InterceptorList interceptors_;
+
+#ifndef NDEBUG
+ // We use this to assert that CreateJob and the registration functions all
+ // run on the same thread.
+ mutable HANDLE allowed_thread_;
+
+ // The first guy to call this function sets the allowed thread. This way we
+ // avoid needing to define that thread externally. Since we expect all
+ // callers to be on the same thread, we don't worry about threads racing to
+ // set the allowed thread.
+ bool IsAllowedThread() const {
+ if (!allowed_thread_)
+ allowed_thread_ = GetCurrentThread();
+ return allowed_thread_ == GetCurrentThread();
+ }
+#endif
+
+ DISALLOW_EVIL_CONSTRUCTORS(URLRequestJobManager);
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_JOB_MANAGER_H__
diff --git a/net/url_request/url_request_job_metrics.cc b/net/url_request/url_request_job_metrics.cc
new file mode 100644
index 0000000..3610459
--- /dev/null
+++ b/net/url_request/url_request_job_metrics.cc
@@ -0,0 +1,57 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_job_metrics.h"
+
+#include "base/basictypes.h"
+#include "base/string_util.h"
+
+void URLRequestJobMetrics::AppendText(std::wstring* text) {
+ if (!text)
+ return;
+
+ text->append(L"job url = ");
+ text->append(UTF8ToWide(original_url_->spec()));
+
+ if (url_.get()) {
+ text->append(L"; redirected url = ");
+ text->append(UTF8ToWide(url_->spec()));
+ }
+
+ TimeDelta elapsed = end_time_ - start_time_;
+ StringAppendF(text,
+ L"; total bytes read = %d; read calls = %d; time = %lld ms;",
+ total_bytes_read_, number_of_read_IO_, elapsed.InMilliseconds());
+
+ if (success_) {
+ text->append(L" success.");
+ } else {
+ text->append(L" fail.");
+ }
+}
diff --git a/net/url_request/url_request_job_metrics.h b/net/url_request/url_request_job_metrics.h
new file mode 100644
index 0000000..e6a9bc6
--- /dev/null
+++ b/net/url_request/url_request_job_metrics.h
@@ -0,0 +1,74 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Records IO statistics associated with a URLRequestJob.
+// See description in navigation_profiler.h for an overview of perf profiling.
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "base/time.h"
+#include "googleurl/src/gurl.h"
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_JOB_METRICS_H__
+#define BASE_URL_REQUEST_URL_REQUEST_JOB_METRICS_H__
+
+class URLRequestJobMetrics {
+ public:
+ URLRequestJobMetrics() : total_bytes_read_(0), number_of_read_IO_(0) { }
+ ~URLRequestJobMetrics() { }
+
+ // The original url the job has been created for.
+ scoped_ptr<GURL> original_url_;
+
+ // The actual url the job connects to. If the actual url is same as the
+ // original url, url_ is empty.
+ scoped_ptr<GURL> url_;
+
+ // Time when the job starts.
+ TimeTicks start_time_;
+
+ // Time when the job is done.
+ TimeTicks end_time_;
+
+ // Total number of bytes the job reads from underline IO.
+ int total_bytes_read_;
+
+ // Number of IO read operations the job issues.
+ int number_of_read_IO_;
+
+ // Final status of the job.
+ bool success_;
+
+ // Append the text report of the frame loading to the input string.
+ void AppendText(std::wstring* text);
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_JOB_METRICS_H__
diff --git a/net/url_request/url_request_job_tracker.cc b/net/url_request/url_request_job_tracker.cc
new file mode 100644
index 0000000..4a0f0f3
--- /dev/null
+++ b/net/url_request/url_request_job_tracker.cc
@@ -0,0 +1,82 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+
+#include "net/url_request/url_request_job_tracker.h"
+
+#include "base/logging.h"
+#include "net/url_request/url_request_job.h"
+
+URLRequestJobTracker g_url_request_job_tracker;
+
+URLRequestJobTracker::URLRequestJobTracker() {
+}
+
+URLRequestJobTracker::~URLRequestJobTracker() {
+ DLOG_IF(WARNING, active_jobs_.size() != 0) <<
+ "Leaking " << active_jobs_.size() << " URLRequestJob object(s), this could be "
+ "because the URLRequest forgot to free it (bad), or if the program was "
+ "terminated while a request was active (normal).";
+}
+
+void URLRequestJobTracker::AddNewJob(URLRequestJob* job) {
+ active_jobs_.push_back(job);
+ FOR_EACH_OBSERVER(JobObserver, observers_, OnJobAdded(job));
+}
+
+void URLRequestJobTracker::RemoveJob(URLRequestJob* job) {
+ JobList::iterator iter = std::find(active_jobs_.begin(), active_jobs_.end(),
+ job);
+ if (iter == active_jobs_.end()) {
+ NOTREACHED() << "Removing a non-active job";
+ return;
+ }
+ active_jobs_.erase(iter);
+
+ FOR_EACH_OBSERVER(JobObserver, observers_, OnJobRemoved(job));
+}
+
+void URLRequestJobTracker::OnJobDone(URLRequestJob* job,
+ const URLRequestStatus& status) {
+ FOR_EACH_OBSERVER(JobObserver, observers_, OnJobDone(job, status));
+}
+
+void URLRequestJobTracker::OnJobRedirect(URLRequestJob* job,
+ const GURL& location,
+ int status_code) {
+ FOR_EACH_OBSERVER(JobObserver, observers_,
+ OnJobRedirect(job, location, status_code));
+}
+
+void URLRequestJobTracker::OnBytesRead(URLRequestJob* job,
+ int byte_count) {
+ FOR_EACH_OBSERVER(JobObserver, observers_,
+ OnBytesRead(job, byte_count));
+}
diff --git a/net/url_request/url_request_job_tracker.h b/net/url_request/url_request_job_tracker.h
new file mode 100644
index 0000000..ca5a380
--- /dev/null
+++ b/net/url_request/url_request_job_tracker.h
@@ -0,0 +1,117 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_JOB_TRACKER_H__
+#define BASE_URL_REQUEST_URL_REQUEST_JOB_TRACKER_H__
+
+#include <vector>
+
+#include "base/observer_list.h"
+#include "net/url_request/url_request_status.h"
+
+class URLRequestJob;
+class GURL;
+
+// This class maintains a list of active URLRequestJobs for debugging purposes.
+// This allows us to warn on leaked jobs and also allows an observer to track
+// what is happening, for example, for the network status monitor.
+//
+// NOTE: URLRequest is single-threaded, so this class should only be used on
+// the same thread where all of the application's URLRequest calls are made.
+//
+class URLRequestJobTracker {
+ public:
+ typedef std::vector<URLRequestJob*> JobList;
+ typedef JobList::const_iterator JobIterator;
+
+ // The observer's methods are called on the thread that called AddObserver.
+ class JobObserver {
+ public:
+ // Called after the given job has been added to the list
+ virtual void OnJobAdded(URLRequestJob* job) = 0;
+
+ // Called after the given job has been removed from the list
+ virtual void OnJobRemoved(URLRequestJob* job) = 0;
+
+ // Called when the given job has completed, before notifying the request
+ virtual void OnJobDone(URLRequestJob* job,
+ const URLRequestStatus& status) = 0;
+
+ // Called when the given job is about to follow a redirect to the given
+ // new URL. The redirect type is given in status_code
+ virtual void OnJobRedirect(URLRequestJob* job, const GURL& location,
+ int status_code) = 0;
+
+ // Called when a new chunk of bytes has been read for the given job. The
+ // byte count is the number of bytes for that read event only.
+ virtual void OnBytesRead(URLRequestJob* job, int byte_count) = 0;
+ };
+
+ URLRequestJobTracker();
+ ~URLRequestJobTracker();
+
+ // adds or removes an observer from the list. note, these methods should
+ // only be called on the same thread where URLRequest objects are used.
+ void AddObserver(JobObserver* observer) {
+ observers_.AddObserver(observer);
+ }
+ void RemoveObserver(JobObserver* observer) {
+ observers_.RemoveObserver(observer);
+ }
+
+ // adds or removes the job from the active list, should be called by the
+ // job constructor and destructor. Note: don't use "AddJob" since that
+ // is #defined by windows.h :(
+ void AddNewJob(URLRequestJob* job);
+ void RemoveJob(URLRequestJob* job);
+
+ // Job status change notifications
+ void OnJobDone(URLRequestJob* job, const URLRequestStatus& status);
+ void OnJobRedirect(URLRequestJob* job, const GURL& location,
+ int status_code);
+
+ // Bytes read notifications.
+ void OnBytesRead(URLRequestJob* job, int byte_count);
+
+ // allows iteration over all active jobs
+ JobIterator begin() const {
+ return active_jobs_.begin();
+ }
+ JobIterator end() const {
+ return active_jobs_.end();
+ }
+
+ private:
+ ObserverList<JobObserver> observers_;
+ JobList active_jobs_;
+};
+
+extern URLRequestJobTracker g_url_request_job_tracker;
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_JOB_TRACKER_H__
diff --git a/net/url_request/url_request_simple_job.cc b/net/url_request/url_request_simple_job.cc
new file mode 100644
index 0000000..cffdf6c
--- /dev/null
+++ b/net/url_request/url_request_simple_job.cc
@@ -0,0 +1,81 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_simple_job.h"
+
+#include "base/message_loop.h"
+#include "net/base/net_errors.h"
+
+URLRequestSimpleJob::URLRequestSimpleJob(URLRequest* request)
+ : URLRequestJob(request),
+ data_offset_(0) {
+}
+
+void URLRequestSimpleJob::Start() {
+ // Start reading asynchronously so that all error reporting and data
+ // callbacks happen as they would for network requests.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestSimpleJob::StartAsync));
+}
+
+bool URLRequestSimpleJob::GetMimeType(std::string* mime_type) {
+ *mime_type = mime_type_;
+ return true;
+}
+
+bool URLRequestSimpleJob::GetCharset(std::string* charset) {
+ *charset = charset_;
+ return true;
+}
+
+bool URLRequestSimpleJob::ReadRawData(char* buf, int buf_size,
+ int* bytes_read) {
+ DCHECK(bytes_read);
+ int remaining = static_cast<int>(data_.size()) - data_offset_;
+ if (buf_size > remaining)
+ buf_size = remaining;
+ memcpy(buf, data_.data() + data_offset_, buf_size);
+ data_offset_ += buf_size;
+ *bytes_read = buf_size;
+ return true;
+}
+
+void URLRequestSimpleJob::StartAsync() {
+ if (!request_)
+ return;
+
+ if (GetData(&mime_type_, &charset_, &data_)) {
+ // Notify that the headers are complete
+ NotifyHeadersComplete();
+ } else {
+ // what should the error code be?
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ net::ERR_INVALID_URL));
+ }
+}
diff --git a/net/url_request/url_request_simple_job.h b/net/url_request/url_request_simple_job.h
new file mode 100644
index 0000000..65743b2
--- /dev/null
+++ b/net/url_request/url_request_simple_job.h
@@ -0,0 +1,60 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_SIMPLE_JOB_H__
+#define NET_URL_REQUEST_URL_REQUEST_SIMPLE_JOB_H__
+
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+class URLRequestSimpleJob : public URLRequestJob {
+ public:
+ URLRequestSimpleJob(URLRequest* request);
+
+ virtual void Start();
+ virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool GetMimeType(std::string* mime_type);
+ virtual bool GetCharset(std::string* charset);
+
+ protected:
+ // subclasses must override the way response data is determined.
+ virtual bool GetData(std::string* mime_type,
+ std::string* charset,
+ std::string* data) const = 0;
+
+ private:
+ void StartAsync();
+
+ std::string mime_type_;
+ std::string charset_;
+ std::string data_;
+ int data_offset_;
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_DATA_JOB_H__
diff --git a/net/url_request/url_request_status.h b/net/url_request/url_request_status.h
new file mode 100644
index 0000000..d0fa16a
--- /dev/null
+++ b/net/url_request/url_request_status.h
@@ -0,0 +1,91 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file's dependencies should be kept to a minimum so that it can be
+// included in WebKit code that doesn't rely on much of common.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_STATUS_H__
+#define BASE_URL_REQUEST_URL_REQUEST_STATUS_H__
+
+// Respresents the result of a URL request. It encodes errors and various
+// types of success.
+class URLRequestStatus {
+ public:
+ enum Status {
+ // Request succeeded, os_error() will be 0.
+ SUCCESS = 0,
+
+ // An IO request is pending, and the caller will be informed when it is
+ // completed.
+ IO_PENDING,
+
+ // Request was successful but was handled by an external program, so there
+ // is no response data. This usually means the current page should not be
+ // navigated, but no error should be displayed. os_error will be 0.
+ HANDLED_EXTERNALLY,
+
+ // Request was cancelled programatically.
+ CANCELED,
+
+ // The request failed for some reason. os_error may have more information.
+ FAILED,
+ };
+
+ URLRequestStatus() : status_(SUCCESS), os_error_(0) {}
+ URLRequestStatus(Status s, int e) : status_(s), os_error_(e) {}
+
+ Status status() const { return status_; }
+ void set_status(Status s) { status_ = s; }
+
+ int os_error() const { return os_error_; }
+ void set_os_error(int e) { os_error_ = e; }
+
+ // Returns true if the status is success, which makes some calling code more
+ // convenient because this is the most common test. Note that we do NOT treat
+ // HANDLED_EXTERNALLY as success. For everything except user notifications,
+ // this value should be handled like an error (processing should stop).
+ bool is_success() const {
+ return status_ == SUCCESS || status_ == IO_PENDING;
+ }
+
+ // Returns true if the request is waiting for IO.
+ bool is_io_pending() const {
+ return status_ == IO_PENDING;
+ }
+
+ private:
+ // Application level status
+ Status status_;
+
+ // Error code from the operating system network layer if an error was
+ // encountered
+ int os_error_;
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_STATUS_H__
diff --git a/net/url_request/url_request_test_job.cc b/net/url_request/url_request_test_job.cc
new file mode 100644
index 0000000..da9d393
--- /dev/null
+++ b/net/url_request/url_request_test_job.cc
@@ -0,0 +1,204 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <vector>
+
+#include "net/url_request/url_request_test_job.h"
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/base/net_errors.h"
+#include "net/url_request/url_request.h"
+
+// This emulates the global message loop for the test URL request class, since
+// this is only test code, it's probably not too dangerous to have this static
+// object.
+static std::vector< scoped_refptr<URLRequestTestJob> > pending_jobs;
+
+// static getters for known URLs
+GURL URLRequestTestJob::test_url_1() {
+ return GURL("test:url1");
+}
+GURL URLRequestTestJob::test_url_2() {
+ return GURL("test:url2");
+}
+GURL URLRequestTestJob::test_url_3() {
+ return GURL("test:url3");
+}
+GURL URLRequestTestJob::test_url_error() {
+ return GURL("test:error");
+}
+
+// static getters for known URL responses
+std::string URLRequestTestJob::test_data_1() {
+ return std::string("<html><title>Test One</title></html>");
+}
+std::string URLRequestTestJob::test_data_2() {
+ return std::string("<html><title>Test Two Two</title></html>");
+}
+std::string URLRequestTestJob::test_data_3() {
+ return std::string("<html><title>Test Three Three Three</title></html>");
+}
+
+// static
+URLRequestJob* URLRequestTestJob::Factory(URLRequest* request,
+ const std::string& scheme) {
+ return new URLRequestTestJob(request);
+}
+
+URLRequestTestJob::URLRequestTestJob(URLRequest* request)
+ : URLRequestJob(request),
+ stage_(WAITING),
+ async_buf_(NULL),
+ async_buf_size_(0),
+ offset_(0) {
+}
+
+// Force the response to set a reasonable MIME type
+bool URLRequestTestJob::GetMimeType(std::string* mime_type) {
+ DCHECK(mime_type);
+ *mime_type = "text/html";
+ return true;
+}
+
+void URLRequestTestJob::Start() {
+ // Start reading asynchronously so that all error reporting and data
+ // callbacks happen as they would for network requests.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestTestJob::StartAsync));
+}
+
+void URLRequestTestJob::StartAsync() {
+ if (request_->url().spec() == test_url_1().spec()) {
+ data_ = test_data_1();
+ stage_ = DATA_AVAILABLE; // Simulate a synchronous response for this one.
+ } else if (request_->url().spec() == test_url_2().spec()) {
+ data_ = test_data_2();
+ } else if (request_->url().spec() == test_url_3().spec()) {
+ data_ = test_data_3();
+ } else {
+ // unexpected url, return error
+ // FIXME(brettw) we may want to use WININET errors or have some more types
+ // of errors
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ net::ERR_INVALID_URL));
+ // FIXME(brettw): this should emulate a network error, and not just fail
+ // initiating a connection
+ return;
+ }
+
+ pending_jobs.push_back(scoped_refptr<URLRequestTestJob>(this));
+
+ this->NotifyHeadersComplete();
+}
+
+bool URLRequestTestJob::ReadRawData(char* buf, int buf_size, int *bytes_read) {
+ if (stage_ == WAITING) {
+ async_buf_ = buf;
+ async_buf_size_ = buf_size;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return false;
+ }
+
+ DCHECK(bytes_read);
+ *bytes_read = 0;
+
+ if (offset_ >= static_cast<int>(data_.length())) {
+ return true; // done reading
+ }
+
+ int to_read = buf_size;
+ if (to_read + offset_ > static_cast<int>(data_.length()))
+ to_read = static_cast<int>(data_.length()) - offset_;
+
+ memcpy(buf, &data_.c_str()[offset_], to_read);
+ offset_ += to_read;
+
+ *bytes_read = to_read;
+ return true;
+}
+
+void URLRequestTestJob::GetResponseInfo(net::HttpResponseInfo* info) {
+ const std::string kResponseHeaders = StringPrintf(
+ "HTTP/1.1 200 OK%c"
+ "Content-type: text/html%c"
+ "%c", 0, 0, 0);
+ info->headers = new net::HttpResponseHeaders(kResponseHeaders);
+}
+
+void URLRequestTestJob::Kill() {
+ if (request_) {
+ // Note that this state will still cause a NotifyDone to get called
+ // in ProcessNextOperation, which is required for jobs.
+ stage_ = ALL_DATA;
+ pending_jobs.push_back(scoped_refptr<URLRequestTestJob>(this));
+ }
+}
+
+bool URLRequestTestJob::ProcessNextOperation() {
+ switch (stage_) {
+ case WAITING:
+ stage_ = DATA_AVAILABLE;
+ // OK if ReadRawData wasn't called yet.
+ if (async_buf_) {
+ int bytes_read;
+ if (!ReadRawData(async_buf_, async_buf_size_, &bytes_read))
+ NOTREACHED() << "This should not return false in DATA_AVAILABLE.";
+ SetStatus(URLRequestStatus()); // clear the io pending flag
+ NotifyReadComplete(bytes_read);
+ }
+ break;
+ case DATA_AVAILABLE:
+ stage_ = ALL_DATA; // done sending data
+ break;
+ case ALL_DATA:
+ stage_ = DONE;
+ return false;
+ case DONE:
+ return false;
+ default:
+ NOTREACHED() << "Invalid stage";
+ return false;
+ }
+ return true;
+}
+
+// static
+bool URLRequestTestJob::ProcessOnePendingMessage() {
+ if (pending_jobs.empty())
+ return false;
+
+ scoped_refptr<URLRequestTestJob> next_job(pending_jobs[0]);
+ pending_jobs.erase(pending_jobs.begin());
+
+ if (next_job->ProcessNextOperation())
+ pending_jobs.push_back(next_job);
+
+ return true;
+}
diff --git a/net/url_request/url_request_test_job.h b/net/url_request/url_request_test_job.h
new file mode 100644
index 0000000..bc7a57b
--- /dev/null
+++ b/net/url_request/url_request_test_job.h
@@ -0,0 +1,110 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_TEST_JOB_H__
+#define BASE_URL_REQUEST_URL_REQUEST_TEST_JOB_H__
+
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+// This job type is designed to help with simple unit tests. To use, you
+// probably want to inherit from it to set up the state you want. Then install
+// it as the protocol handler for the "test" scheme.
+//
+// It will respond to three URLs, which you can retrieve using the test_url*
+// getters, which will in turn respond with the corresponding responses returned
+// by test_data*. Any other URLs that begin with "test:" will return an error,
+// which might also be useful, you can use test_url_error() to retreive a
+// standard one.
+//
+// You can override the known URLs or the response data by overriding Start().
+//
+// When a job is created, it gets put on a queue of pending test jobs. To
+// process jobs on this queue, use ProcessOnePendingMessage, which will process
+// one step of the next job. If the job is incomplete, it will be added to the
+// end of the queue.
+class URLRequestTestJob : public URLRequestJob {
+ public:
+ URLRequestTestJob(URLRequest* request);
+ virtual ~URLRequestTestJob() {}
+
+ // the three URLs this handler will respond to
+ // FIXME(brettw): we should probably also have a redirect one
+ static GURL test_url_1();
+ static GURL test_url_2();
+ static GURL test_url_3();
+ static GURL test_url_error();
+
+ // the data that corresponds to each of the URLs above
+ static std::string test_data_1();
+ static std::string test_data_2();
+ static std::string test_data_3();
+
+ // Processes one pending message from the stack, returning true if any
+ // message was processed, or false if there are no more pending request
+ // notifications to send.
+ static bool ProcessOnePendingMessage();
+
+ // Factory method for protocol factory registration if callers don't subclass
+ static URLRequest::ProtocolFactory Factory;
+
+ // Job functions
+ virtual void Start();
+ virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual void Kill();
+ virtual bool GetMimeType(std::string* mime_type);
+ virtual void GetResponseInfo(net::HttpResponseInfo* info);
+
+ protected:
+ // This is what operation we are going to do next when this job is handled.
+ // When the stage is DONE, this job will not be put on the queue.
+ enum Stage { WAITING, DATA_AVAILABLE, ALL_DATA, DONE };
+
+ // Call to process the next opeation, usually sending a notification, and
+ // advancing the stage if necessary. THIS MAY DELETE THE OBJECT, we will
+ // return false if the operations are complete, true if there are more.
+ bool ProcessNextOperation();
+
+ // Called via InvokeLater to cause callbacks to occur after Start() returns.
+ void StartAsync();
+
+ Stage stage_;
+
+ // The data to send, will be set in Start()
+ std::string data_;
+
+ // current offset within data_
+ int offset_;
+
+ // Holds the buffer for an asynchronous ReadRawData call
+ char* async_buf_;
+ int async_buf_size_;
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_TEST_JOB_H__
diff --git a/net/url_request/url_request_unittest.cc b/net/url_request/url_request_unittest.cc
new file mode 100644
index 0000000..6f842bb
--- /dev/null
+++ b/net/url_request/url_request_unittest.cc
@@ -0,0 +1,792 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <windows.h>
+#include <shlobj.h>
+#include <algorithm>
+#include <string>
+
+#include "net/url_request/url_request_unittest.h"
+
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/process_util.h"
+#include "base/string_util.h"
+#include "net/base/load_flags.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_module.h"
+#include "net/base/net_util.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/http/http_cache.h"
+#include "net/http/http_network_layer.h"
+#include "net/url_request/url_request.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class URLRequestTest : public testing::Test {
+};
+
+class URLRequestHttpCacheContext : public URLRequestContext {
+ public:
+ URLRequestHttpCacheContext() {
+ http_transaction_factory_ =
+ new net::HttpCache(net::HttpNetworkLayer::CreateFactory(NULL),
+ disk_cache::CreateInMemoryCacheBackend(0));
+ }
+
+ virtual ~URLRequestHttpCacheContext() {
+ delete http_transaction_factory_;
+ }
+};
+
+class TestURLRequest : public URLRequest {
+ public:
+ TestURLRequest(const GURL& url, Delegate* delegate)
+ : URLRequest(url, delegate) {
+ set_context(new URLRequestHttpCacheContext());
+ }
+};
+
+std::string TestNetResourceProvider(int key) {
+ return "header";
+}
+
+}
+
+TEST(URLRequestTest, GetTest_NoCache) {
+ TestServer server(L"");
+ TestDelegate d;
+ {
+ TestURLRequest r(server.TestServerPage(""), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_NE(0, d.bytes_received());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, GetTest) {
+ TestServer server(L"");
+ TestDelegate d;
+ {
+ TestURLRequest r(server.TestServerPage(""), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_NE(0, d.bytes_received());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, CancelTest) {
+ TestDelegate d;
+ {
+ TestURLRequest r(GURL("http://www.google.com/"), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ r.Cancel();
+
+ MessageLoop::current()->Run();
+
+ // We expect to receive OnResponseStarted even though the request has been
+ // cancelled.
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_EQ(0, d.bytes_received());
+ EXPECT_FALSE(d.received_data_before_response());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, CancelTest2) {
+ TestServer server(L"");
+ TestDelegate d;
+ {
+ TestURLRequest r(server.TestServerPage(""), &d);
+
+ d.set_cancel_in_response_started(true);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_EQ(0, d.bytes_received());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, CancelTest3) {
+ TestServer server(L"");
+ TestDelegate d;
+ {
+ TestURLRequest r(server.TestServerPage(""), &d);
+
+ d.set_cancel_in_received_data(true);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(1, d.response_started_count());
+ // There is no guarantee about how much data was received
+ // before the cancel was issued. It could have been 0 bytes,
+ // or it could have been all the bytes.
+ // EXPECT_EQ(0, d.bytes_received());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, CancelTest4) {
+ TestServer server(L"");
+ TestDelegate d;
+ {
+ TestURLRequest r(server.TestServerPage(""), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ // The request will be implicitly canceled when it is destroyed. The
+ // test delegate must not post a quit message when this happens because
+ // this test doesn't actually have a message loop. The quit message would
+ // get put on this thread's message queue and the next test would exit
+ // early, causing problems.
+ d.set_quit_on_complete(false);
+ }
+ // expect things to just cleanup properly.
+
+ // we won't actually get a received reponse here because we've never run the
+ // message loop
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_EQ(0, d.bytes_received());
+}
+
+TEST(URLRequestTest, CancelTest5) {
+ TestServer server(L"");
+ scoped_refptr<URLRequestContext> context = new URLRequestHttpCacheContext();
+
+ // populate cache
+ {
+ TestDelegate d;
+ URLRequest r(server.TestServerPage("cachetime"), &d);
+ r.set_context(context);
+ r.Start();
+ MessageLoop::current()->Run();
+ EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+ }
+
+ // cancel read from cache (see bug 990242)
+ {
+ TestDelegate d;
+ URLRequest r(server.TestServerPage("cachetime"), &d);
+ r.set_context(context);
+ r.Start();
+ r.Cancel();
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_EQ(0, d.bytes_received());
+ EXPECT_FALSE(d.received_data_before_response());
+ }
+
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count, 0);
+#endif
+}
+
+TEST(URLRequestTest, PostTest) {
+ TestServer server(L"net/data");
+
+ const int kMsgSize = 20000; // multiple of 10
+ const int kIterations = 50;
+ char *uploadBytes = new char[kMsgSize+1];
+ char *ptr = uploadBytes;
+ char marker = 'a';
+ for(int idx=0; idx<kMsgSize/10; idx++) {
+ memcpy(ptr, "----------", 10);
+ ptr += 10;
+ if (idx % 100 == 0) {
+ ptr--;
+ *ptr++ = marker;
+ if (++marker > 'z')
+ marker = 'a';
+ }
+
+ }
+ uploadBytes[kMsgSize] = '\0';
+
+ scoped_refptr<URLRequestContext> context =
+ new URLRequestHttpCacheContext();
+
+ for (int i = 0; i < kIterations; ++i) {
+ TestDelegate d;
+ URLRequest r(server.TestServerPage("echo"), &d);
+ r.set_context(context);
+ r.set_method("POST");
+
+ r.AppendBytesToUpload(uploadBytes, kMsgSize);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ ASSERT_EQ(1, d.response_started_count()) << "request failed: " <<
+ (int) r.status().status() << ", os error: " << r.status().os_error();
+
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_EQ(uploadBytes, d.data_received());
+ EXPECT_EQ(memcmp(uploadBytes, d.data_received().c_str(), kMsgSize),0);
+ EXPECT_EQ(d.data_received().compare(uploadBytes), 0);
+ }
+ delete[] uploadBytes;
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, PostEmptyTest) {
+ TestServer server(L"net/data");
+ TestDelegate d;
+ {
+ TestURLRequest r(server.TestServerPage("echo"), &d);
+ r.set_method("POST");
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ ASSERT_EQ(1, d.response_started_count()) << "request failed: " <<
+ (int) r.status().status() << ", os error: " << r.status().os_error();
+
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_TRUE(d.data_received().empty());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, PostFileTest) {
+ TestServer server(L"net/data");
+ TestDelegate d;
+ {
+ TestURLRequest r(server.TestServerPage("echo"), &d);
+ r.set_method("POST");
+
+ std::wstring dir;
+ PathService::Get(base::DIR_EXE, &dir);
+ _wchdir(dir.c_str());
+
+ std::wstring path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &path);
+ file_util::AppendToPath(&path, L"net");
+ file_util::AppendToPath(&path, L"data");
+ file_util::AppendToPath(&path, L"url_request_unittest");
+ file_util::AppendToPath(&path, L"with-headers.html");
+ r.AppendFileToUpload(path);
+
+ // This file should just be ignored in the upload stream.
+ r.AppendFileToUpload(L"c:\\path\\to\\non\\existant\\file.randomness.12345");
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ HANDLE file = CreateFile(path.c_str(), GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ ASSERT_NE(INVALID_HANDLE_VALUE, file);
+
+ DWORD size = GetFileSize(file, NULL);
+ scoped_array<char> buf(new char[size]);
+
+ DWORD size_read;
+ EXPECT_TRUE(ReadFile(file, buf.get(), size, &size_read, NULL));
+
+ CloseHandle(file);
+
+ EXPECT_EQ(size, size_read);
+
+ ASSERT_EQ(1, d.response_started_count()) << "request failed: " <<
+ (int) r.status().status() << ", os error: " << r.status().os_error();
+
+ EXPECT_FALSE(d.received_data_before_response());
+
+ ASSERT_EQ(size, d.bytes_received());
+ EXPECT_EQ(0, memcmp(d.data_received().c_str(), buf.get(), size));
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, AboutBlankTest) {
+ TestDelegate d;
+ {
+ TestURLRequest r(GURL("about:blank"), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ EXPECT_TRUE(!r.is_pending());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_EQ(d.bytes_received(), 0);
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, FileTest) {
+ std::wstring app_path;
+ PathService::Get(base::FILE_EXE, &app_path);
+
+ std::string app_url = WideToUTF8(app_path);
+ std::replace(app_url.begin(), app_url.end(),
+ file_util::kPathSeparator, L'/');
+ app_url.insert(0, "file:///");
+
+ TestDelegate d;
+ {
+ TestURLRequest r(GURL(app_url), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ WIN32_FILE_ATTRIBUTE_DATA data;
+ GetFileAttributesEx(app_path.c_str(), GetFileExInfoStandard, &data);
+
+ EXPECT_TRUE(!r.is_pending());
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_EQ(d.bytes_received(), data.nFileSizeLow);
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, InvalidUrlTest) {
+ TestDelegate d;
+ {
+ TestURLRequest r(GURL("invalid url"), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+ EXPECT_TRUE(d.request_failed());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+/* This test is disabled because it fails on some computers due to proxies
+ returning a page in response to this request rather than reporting failure.
+TEST(URLRequestTest, DnsFailureTest) {
+ TestDelegate d;
+ {
+ URLRequest r(GURL("http://thisisnotavalidurl0123456789foo.com/"), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+ EXPECT_TRUE(d.request_failed());
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+*/
+
+TEST(URLRequestTest, ResponseHeadersTest) {
+ TestServer server(L"net/data/url_request_unittest");
+ TestDelegate d;
+ TestURLRequest req(server.TestServerPage("files/with-headers.html"), &d);
+ req.Start();
+ MessageLoop::current()->Run();
+
+ const net::HttpResponseHeaders* headers = req.response_headers();
+ std::string header;
+ EXPECT_TRUE(headers->GetNormalizedHeader("cache-control", &header));
+ EXPECT_EQ("private", header);
+
+ header.clear();
+ EXPECT_TRUE(headers->GetNormalizedHeader("content-type", &header));
+ EXPECT_EQ("text/html; charset=ISO-8859-1", header);
+
+ // The response has two "X-Multiple-Entries" headers.
+ // This verfies our output has them concatenated together.
+ header.clear();
+ EXPECT_TRUE(headers->GetNormalizedHeader("x-multiple-entries", &header));
+ EXPECT_EQ("a, b", header);
+}
+
+TEST(URLRequestTest, BZip2ContentTest) {
+ TestServer server(L"net/data/filter_unittests");
+
+ // for localhost domain, we also should support bzip2 encoding
+ // first, get the original file
+ TestDelegate d1;
+ TestURLRequest req1(server.TestServerPage("realfiles/google.txt"), &d1);
+ req1.Start();
+ MessageLoop::current()->Run();
+
+ const std::string& got_content = d1.data_received();
+
+ // second, get bzip2 content
+ TestDelegate d2;
+ TestURLRequest req2(server.TestServerPage("realbz2files/google.txt"), &d2);
+ req2.Start();
+ MessageLoop::current()->Run();
+
+ const std::string& got_bz2_content = d2.data_received();
+
+ // compare those two results
+ EXPECT_TRUE(got_content == got_bz2_content);
+}
+
+TEST(URLRequestTest, BZip2ContentTest_IncrementalHeader) {
+ TestServer server(L"net/data/filter_unittests");
+
+ // for localhost domain, we also should support bzip2 encoding
+ // first, get the original file
+ TestDelegate d1;
+ TestURLRequest req1(server.TestServerPage("realfiles/google.txt"), &d1);
+ req1.Start();
+ MessageLoop::current()->Run();
+
+ const std::string& got_content = d1.data_received();
+
+ // second, get bzip2 content. ask the testserver to send the BZ2 header in
+ // two chunks with a delay between them. this tests our fix for bug 867161.
+ TestDelegate d2;
+ TestURLRequest req2(server.TestServerPage("realbz2files/google.txt?incremental-header"), &d2);
+ req2.Start();
+ MessageLoop::current()->Run();
+
+ const std::string& got_bz2_content = d2.data_received();
+
+ // compare those two results
+ EXPECT_TRUE(got_content == got_bz2_content);
+}
+
+TEST(URLRequestTest, ResolveShortcutTest) {
+ std::wstring app_path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+ file_util::AppendToPath(&app_path, L"net");
+ file_util::AppendToPath(&app_path, L"data");
+ file_util::AppendToPath(&app_path, L"url_request_unittest");
+ file_util::AppendToPath(&app_path, L"with-headers.html");
+
+ std::wstring lnk_path = app_path + L".lnk";
+
+ HRESULT result;
+ IShellLink *shell = NULL;
+ IPersistFile *persist = NULL;
+
+ CoInitialize(NULL);
+ // Temporarily create a shortcut for test
+ result = CoCreateInstance(CLSID_ShellLink, NULL,
+ CLSCTX_INPROC_SERVER, IID_IShellLink,
+ reinterpret_cast<LPVOID*>(&shell));
+ EXPECT_TRUE(SUCCEEDED(result));
+ result = shell->QueryInterface(IID_IPersistFile,
+ reinterpret_cast<LPVOID*>(&persist));
+ EXPECT_TRUE(SUCCEEDED(result));
+ result = shell->SetPath(app_path.c_str());
+ EXPECT_TRUE(SUCCEEDED(result));
+ result = shell->SetDescription(L"ResolveShortcutTest");
+ EXPECT_TRUE(SUCCEEDED(result));
+ result = persist->Save(lnk_path.c_str(), TRUE);
+ EXPECT_TRUE(SUCCEEDED(result));
+ if (persist)
+ persist->Release();
+ if (shell)
+ shell->Release();
+
+ TestDelegate d;
+ {
+ TestURLRequest r(net_util::FilePathToFileURL(lnk_path), &d);
+
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ WIN32_FILE_ATTRIBUTE_DATA data;
+ GetFileAttributesEx(app_path.c_str(), GetFileExInfoStandard, &data);
+ HANDLE file = CreateFile(app_path.c_str(), GENERIC_READ,
+ FILE_SHARE_READ, NULL, OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL, NULL);
+ EXPECT_NE(INVALID_HANDLE_VALUE, file);
+ scoped_array<char> buffer(new char[data.nFileSizeLow]);
+ DWORD read_size;
+ BOOL result;
+ result = ReadFile(file, buffer.get(), data.nFileSizeLow,
+ &read_size, NULL);
+ std::string content(buffer.get(), read_size);
+ CloseHandle(file);
+
+ EXPECT_TRUE(!r.is_pending());
+ EXPECT_EQ(1, d.received_redirect_count());
+ EXPECT_EQ(content, d.data_received());
+ }
+
+ // Clean the shortcut
+ DeleteFile(lnk_path.c_str());
+ CoUninitialize();
+
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+}
+
+TEST(URLRequestTest, ContentTypeNormalizationTest) {
+ TestServer server(L"net/data/url_request_unittest");
+ TestDelegate d;
+ TestURLRequest req(server.TestServerPage(
+ "files/content-type-normalization.html"), &d);
+ req.Start();
+ MessageLoop::current()->Run();
+
+ std::string mime_type;
+ req.GetMimeType(&mime_type);
+ EXPECT_EQ("text/html", mime_type);
+
+ std::string charset;
+ req.GetCharset(&charset);
+ EXPECT_EQ("utf-8", charset);
+ req.Cancel();
+}
+
+TEST(URLRequestTest, FileDirCancelTest) {
+ // Put in mock resource provider.
+ NetModule::SetResourceProvider(TestNetResourceProvider);
+
+ TestDelegate d;
+ {
+ std::wstring file_path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &file_path);
+ file_util::AppendToPath(&file_path, L"net");
+ file_util::AppendToPath(&file_path, L"data");
+ file_util::AppendToPath(&file_path, L"");
+
+ TestURLRequest req(net_util::FilePathToFileURL(file_path), &d);
+ req.Start();
+ EXPECT_TRUE(req.is_pending());
+
+ d.set_cancel_in_received_data_pending(true);
+
+ MessageLoop::current()->Run();
+ }
+#ifndef NDEBUG
+ DCHECK_EQ(url_request_metrics.object_count,0);
+#endif
+
+ // Take out mock resource provider.
+ NetModule::SetResourceProvider(NULL);
+}
+
+TEST(URLRequestTest, RestrictRedirects) {
+ TestServer server(L"net/data/url_request_unittest");
+ TestDelegate d;
+ TestURLRequest req(server.TestServerPage(
+ "files/redirect-to-file.html"), &d);
+ req.Start();
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(URLRequestStatus::FAILED, req.status().status());
+ EXPECT_EQ(net::ERR_UNSAFE_REDIRECT, req.status().os_error());
+}
+
+TEST(URLRequestTest, NoUserPassInReferrer) {
+ TestServer server(L"net/data/url_request_unittest");
+ TestDelegate d;
+ TestURLRequest req(server.TestServerPage(
+ "echoheader?Referer"), &d);
+ req.set_referrer("http://user:pass@foo.com/");
+ req.Start();
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(std::string("http://foo.com/"), d.data_received());
+}
+
+TEST(URLRequestTest, CancelRedirect) {
+ TestServer server(L"net/data/url_request_unittest");
+ TestDelegate d;
+ {
+ d.set_cancel_in_received_redirect(true);
+ TestURLRequest req(server.TestServerPage(
+ "files/redirect-test.html"), &d);
+ req.Start();
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_EQ(0, d.bytes_received());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+ }
+}
+
+TEST(URLRequestTest, VaryHeader) {
+ TestServer server(L"net/data/url_request_unittest");
+
+ scoped_refptr<URLRequestContext> context = new URLRequestHttpCacheContext();
+
+ Time response_time;
+
+ // populate the cache
+ {
+ TestDelegate d;
+ URLRequest req(server.TestServerPage("echoheader?foo"), &d);
+ req.set_context(context);
+ req.SetExtraRequestHeaders("foo:1");
+ req.Start();
+ MessageLoop::current()->Run();
+
+ response_time = req.response_time();
+ }
+
+ // Make sure that the response time of a future response will be in the
+ // future!
+ Sleep(10);
+
+ // expect a cache hit
+ {
+ TestDelegate d;
+ URLRequest req(server.TestServerPage("echoheader?foo"), &d);
+ req.set_context(context);
+ req.SetExtraRequestHeaders("foo:1");
+ req.Start();
+ MessageLoop::current()->Run();
+
+ EXPECT_TRUE(req.response_time() == response_time);
+ }
+
+ // expect a cache miss
+ {
+ TestDelegate d;
+ URLRequest req(server.TestServerPage("echoheader?foo"), &d);
+ req.set_context(context);
+ req.SetExtraRequestHeaders("foo:2");
+ req.Start();
+ MessageLoop::current()->Run();
+
+ EXPECT_FALSE(req.response_time() == response_time);
+ }
+}
+
+TEST(URLRequestTest, BasicAuth) {
+ scoped_refptr<URLRequestContext> context = new URLRequestHttpCacheContext();
+ TestServer server(L"");
+
+ Time response_time;
+
+ // populate the cache
+ {
+ TestDelegate d;
+ d.set_username(L"user");
+ d.set_password(L"secret");
+
+ URLRequest r(server.TestServerPage("auth-basic"), &d);
+ r.set_context(context);
+ r.Start();
+
+ MessageLoop::current()->Run();
+
+ EXPECT_TRUE(d.data_received().find("user/secret") != std::string::npos);
+
+ response_time = r.response_time();
+ }
+
+ // Let some time pass so we can ensure that a future response will have a
+ // response time value in the future.
+ Sleep(10 /* milliseconds */);
+
+ // repeat request with end-to-end validation. since auth-basic results in a
+ // cachable page, we expect this test to result in a 304. in which case, the
+ // response should be fetched from the cache.
+ {
+ TestDelegate d;
+ d.set_username(L"user");
+ d.set_password(L"secret");
+
+ URLRequest r(server.TestServerPage("auth-basic"), &d);
+ r.set_context(context);
+ r.set_load_flags(net::LOAD_VALIDATE_CACHE);
+ r.Start();
+
+ MessageLoop::current()->Run();
+
+ EXPECT_TRUE(d.data_received().find("user/secret") != std::string::npos);
+
+ // Should be the same cached document, which means that the response time
+ // should not have changed.
+ EXPECT_TRUE(response_time == r.response_time());
+ }
+}
diff --git a/net/url_request/url_request_unittest.h b/net/url_request/url_request_unittest.h
new file mode 100644
index 0000000..7e40710
--- /dev/null
+++ b/net/url_request/url_request_unittest.h
@@ -0,0 +1,380 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_URL_REQUEST_URL_REQUEST_UNITTEST_H_
+#define BASE_URL_REQUEST_URL_REQUEST_UNITTEST_H_
+
+#include <sstream>
+#include <string>
+
+#include "base/file_util.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/process_util.h"
+#include "base/string_util.h"
+#include "net/base/net_errors.h"
+#include "net/http/http_network_layer.h"
+#include "net/url_request/url_request.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+const int kDefaultPort = 1337;
+const std::string kDefaultHostName("localhost");
+
+// This URLRequestContext does not use a local cache.
+class TestURLRequestContext : public URLRequestContext {
+ public:
+ TestURLRequestContext() {
+ http_transaction_factory_ = net::HttpNetworkLayer::CreateFactory(NULL);
+ }
+
+ virtual ~TestURLRequestContext() {
+ delete http_transaction_factory_;
+ }
+};
+
+class TestDelegate : public URLRequest::Delegate {
+ public:
+ TestDelegate()
+ : cancel_in_rr_(false),
+ cancel_in_rs_(false),
+ cancel_in_rd_(false),
+ cancel_in_rd_pending_(false),
+ quit_on_complete_(true),
+ response_started_count_(0),
+ received_bytes_count_(0),
+ received_redirect_count_(0),
+ received_data_before_response_(false),
+ request_failed_(false) {
+ }
+
+ virtual void OnReceivedRedirect(URLRequest* request, const GURL& new_url) {
+ received_redirect_count_++;
+ if (cancel_in_rr_)
+ request->Cancel();
+ }
+
+ virtual void OnResponseStarted(URLRequest* request) {
+ // It doesn't make sense for the request to have IO pending at this point.
+ DCHECK(!request->status().is_io_pending());
+
+ response_started_count_++;
+ if (cancel_in_rs_) {
+ request->Cancel();
+ OnResponseCompleted(request);
+ } else if (!request->status().is_success()) {
+ DCHECK(request->status().status() == URLRequestStatus::FAILED ||
+ request->status().status() == URLRequestStatus::CANCELED);
+ request_failed_ = true;
+ OnResponseCompleted(request);
+ } else {
+ // Initiate the first read.
+ int bytes_read = 0;
+ if (request->Read(buf_, sizeof(buf_), &bytes_read))
+ OnReadCompleted(request, bytes_read);
+ else if (!request->status().is_io_pending())
+ OnResponseCompleted(request);
+ }
+ }
+
+ virtual void OnReadCompleted(URLRequest* request, int bytes_read) {
+ // It doesn't make sense for the request to have IO pending at this point.
+ DCHECK(!request->status().is_io_pending());
+
+ if (response_started_count_ == 0)
+ received_data_before_response_ = true;
+
+ if (cancel_in_rd_)
+ request->Cancel();
+
+ if (bytes_read >= 0) {
+ // There is data to read.
+ received_bytes_count_ += bytes_read;
+
+ // consume the data
+ data_received_.append(buf_, bytes_read);
+ }
+
+ // If it was not end of stream, request to read more.
+ if (request->status().is_success() && bytes_read > 0) {
+ bytes_read = 0;
+ while (request->Read(buf_, sizeof(buf_), &bytes_read)) {
+ if (bytes_read > 0) {
+ data_received_.append(buf_, bytes_read);
+ received_bytes_count_ += bytes_read;
+ } else {
+ break;
+ }
+ }
+ }
+ if (!request->status().is_io_pending())
+ OnResponseCompleted(request);
+ else if (cancel_in_rd_pending_)
+ request->Cancel();
+ }
+
+ void OnResponseCompleted(URLRequest* request) {
+ if (quit_on_complete_)
+ MessageLoop::current()->Quit();
+ }
+
+ void OnAuthRequired(URLRequest* request, AuthChallengeInfo* auth_info) {
+ if (!username_.empty() || !password_.empty()) {
+ request->SetAuth(username_, password_);
+ } else {
+ request->CancelAuth();
+ }
+ }
+
+ virtual void OnSSLCertificateError(URLRequest* request,
+ int cert_error,
+ X509Certificate* cert) {
+ // Ignore SSL errors, we test the server is started and shut it down by
+ // performing GETs, no security restrictions should apply as we always want
+ // these GETs to go through.
+ request->ContinueDespiteLastError();
+ }
+
+ void set_cancel_in_received_redirect(bool val) { cancel_in_rr_ = val; }
+ void set_cancel_in_response_started(bool val) { cancel_in_rs_ = val; }
+ void set_cancel_in_received_data(bool val) { cancel_in_rd_ = val; }
+ void set_cancel_in_received_data_pending(bool val) {
+ cancel_in_rd_pending_ = val;
+ }
+ void set_quit_on_complete(bool val) { quit_on_complete_ = val; }
+ void set_username(const std::wstring& u) { username_ = u; }
+ void set_password(const std::wstring& p) { password_ = p; }
+
+ // query state
+ const std::string& data_received() const { return data_received_; }
+ int bytes_received() const { return static_cast<int>(data_received_.size()); }
+ int response_started_count() const { return response_started_count_; }
+ int received_redirect_count() const { return received_redirect_count_; }
+ bool received_data_before_response() const {
+ return received_data_before_response_;
+ }
+ bool request_failed() const { return request_failed_; }
+
+ private:
+ // options for controlling behavior
+ bool cancel_in_rr_;
+ bool cancel_in_rs_;
+ bool cancel_in_rd_;
+ bool cancel_in_rd_pending_;
+ bool quit_on_complete_;
+
+ std::wstring username_;
+ std::wstring password_;
+
+ // tracks status of callbacks
+ int response_started_count_;
+ int received_bytes_count_;
+ int received_redirect_count_;
+ bool received_data_before_response_;
+ bool request_failed_;
+ std::string data_received_;
+
+ // our read buffer
+ char buf_[4096];
+};
+
+// This object bounds the lifetime of an external python-based HTTP server
+// that can provide various responses useful for testing.
+class TestServer : public process_util::ProcessFilter {
+ public:
+ TestServer(const std::wstring& document_root)
+ : context_(new TestURLRequestContext),
+ process_handle_(NULL),
+ is_shutdown_(true) {
+ Init(kDefaultHostName, kDefaultPort, document_root, std::wstring());
+ }
+
+ virtual ~TestServer() {
+ Shutdown();
+ }
+
+ // Implementation of ProcessFilter
+ virtual bool Includes(uint32 pid, uint32 parent_pid) const {
+ // This function may be called after Shutdown(), in which process_handle_ is
+ // set to NULL. Since no process handle is set, it can't be included in the
+ // filter.
+ if (!process_handle_)
+ return false;
+ return pid == process_util::GetProcId(process_handle_);
+ }
+
+ GURL TestServerPage(const std::string& path) {
+ return GURL(base_address_ + path);
+ }
+
+ GURL TestServerPageW(const std::wstring& path) {
+ return GURL(UTF8ToWide(base_address_) + path);
+ }
+
+ // A subclass may wish to send the request in a different manner
+ virtual bool MakeGETRequest(const std::string& page_name) {
+ TestDelegate d;
+ URLRequest r(TestServerPage(page_name), &d);
+ r.set_context(context_);
+ r.set_method("GET");
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ return r.status().is_success();
+ }
+
+ protected:
+ struct ManualInit {};
+
+ // Used by subclasses that need to defer initialization until they are fully
+ // constructed. The subclass should call Init once it is ready (usually in
+ // its constructor).
+ TestServer(ManualInit)
+ : context_(new TestURLRequestContext),
+ process_handle_(NULL),
+ is_shutdown_(true) {
+ }
+
+ virtual std::string scheme() { return std::string("http"); }
+
+ // This is in a separate function so that we can have assertions and so that
+ // subclasses can call this later.
+ void Init(const std::string& host_name, int port,
+ const std::wstring& document_root,
+ const std::wstring& cert_path) {
+ std::stringstream ss;
+ std::string port_str;
+ ss << port ? port : kDefaultPort;
+ ss >> port_str;
+ base_address_ = scheme() + "://" + host_name + ":" + port_str + "/";
+
+ std::wstring testserver_path;
+ ASSERT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &testserver_path));
+ file_util::AppendToPath(&testserver_path, L"net");
+ file_util::AppendToPath(&testserver_path, L"tools");
+ file_util::AppendToPath(&testserver_path, L"testserver");
+ file_util::AppendToPath(&testserver_path, L"testserver.py");
+
+ ASSERT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &python_runtime_));
+ file_util::AppendToPath(&python_runtime_, L"third_party");
+ file_util::AppendToPath(&python_runtime_, L"python_24");
+ file_util::AppendToPath(&python_runtime_, L"python.exe");
+
+ std::wstring test_data_directory;
+ PathService::Get(base::DIR_SOURCE_ROOT, &test_data_directory);
+ std::wstring normalized_document_root = document_root;
+ std::replace(normalized_document_root.begin(),
+ normalized_document_root.end(),
+ L'/', file_util::kPathSeparator);
+ file_util::AppendToPath(&test_data_directory, normalized_document_root);
+
+ std::wstring command_line =
+ L"\"" + python_runtime_ + L"\" " + L"\"" + testserver_path +
+ L"\" --port=" + UTF8ToWide(port_str) + L" --data-dir=\"" +
+ test_data_directory + L"\"";
+ if (!cert_path.empty()) {
+ command_line.append(L" --https=\"");
+ command_line.append(cert_path);
+ command_line.append(L"\"");
+ }
+
+ ASSERT_TRUE(
+ process_util::LaunchApp(command_line, false, true, &process_handle_)) <<
+ "Failed to launch " << command_line;
+
+ // Verify that the webserver is actually started.
+ // Otherwise tests can fail if they run faster than Python can start.
+ int retries = 10;
+ bool success;
+ while ((success = MakeGETRequest("hello.html")) == false && retries > 0) {
+ retries--;
+ ::Sleep(500);
+ }
+ ASSERT_TRUE(success) << "Webserver not starting properly.";
+
+ is_shutdown_ = false;
+ }
+
+ void Shutdown() {
+ if (is_shutdown_)
+ return;
+
+ // here we append the time to avoid problems where the kill page
+ // is being cached rather than being executed on the server
+ std::ostringstream page_name;
+ page_name << "kill?" << GetTickCount();
+ int retry_count = 5;
+ while (retry_count > 0) {
+ bool r = MakeGETRequest(page_name.str());
+ // BUG #1048625 causes the kill GET to fail. For now we just retry.
+ // Once the bug is fixed, we should remove the while loop and put back
+ // the following DCHECK.
+ // DCHECK(r);
+ if (r)
+ break;
+ retry_count--;
+ }
+ // Make sure we were successfull in stopping the testserver.
+ DCHECK(retry_count > 0);
+
+ if (process_handle_) {
+ CloseHandle(process_handle_);
+ process_handle_ = NULL;
+ }
+
+ // Make sure we don't leave any stray testserver processes laying around.
+ std::wstring testserver_name =
+ file_util::GetFilenameFromPath(python_runtime_);
+ process_util::CleanupProcesses(testserver_name, 10000, 1, this);
+ EXPECT_EQ(0, process_util::GetProcessCount(testserver_name, this));
+
+ is_shutdown_ = true;
+ }
+
+ private:
+ scoped_refptr<TestURLRequestContext> context_;
+ std::string base_address_;
+ std::wstring python_runtime_;
+ HANDLE process_handle_;
+ bool is_shutdown_;
+};
+
+class HTTPSTestServer : public TestServer {
+ public:
+ HTTPSTestServer(const std::string& host_name, int port,
+ const std::wstring& document_root,
+ const std::wstring& cert_path) : TestServer(ManualInit()) {
+ Init(host_name, port, document_root, cert_path);
+ }
+
+ virtual std::string scheme() { return std::string("https"); }
+};
+
+#endif // BASE_URL_REQUEST_URL_REQUEST_UNITTEST_H_
diff --git a/net/url_request/url_request_view_cache_job.cc b/net/url_request/url_request_view_cache_job.cc
new file mode 100644
index 0000000..46943a4
--- /dev/null
+++ b/net/url_request/url_request_view_cache_job.cc
@@ -0,0 +1,194 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "net/url_request/url_request_view_cache_job.h"
+
+#include "base/string_util.h"
+#include "net/base/escape.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/http/http_cache.h"
+#include "net/http/http_response_info.h"
+
+#define VIEW_CACHE_HEAD \
+ "<html><body><table>"
+
+#define VIEW_CACHE_TAIL \
+ "</table></body></html>"
+
+static void HexDump(const char *buf, size_t buf_len, std::string* result) {
+ const size_t kMaxRows = 16;
+ int offset = 0;
+
+ const unsigned char *p;
+ while (buf_len) {
+ StringAppendF(result, "%08x: ", offset);
+ offset += kMaxRows;
+
+ p = (const unsigned char *) buf;
+
+ size_t i;
+ size_t row_max = std::min(kMaxRows, buf_len);
+
+ // print hex codes:
+ for (i = 0; i < row_max; ++i)
+ StringAppendF(result, "%02x ", *p++);
+ for (i = row_max; i < kMaxRows; ++i)
+ result->append(" ");
+
+ // print ASCII glyphs if possible:
+ p = (const unsigned char *) buf;
+ for (i = 0; i < row_max; ++i, ++p) {
+ if (*p < 0x7F && *p > 0x1F) {
+ AppendEscapedCharForHTML(*p, result);
+ } else {
+ result->push_back('.');
+ }
+ }
+
+ result->push_back('\n');
+
+ buf += row_max;
+ buf_len -= row_max;
+ }
+}
+
+static std::string FormatEntryInfo(disk_cache::Entry* entry) {
+ std::string key = EscapeForHTML(entry->GetKey());
+ std::string row =
+ "<tr><td><a href=\"view-cache:" + key + "\">" + key + "</a></td></tr>";
+ return row;
+}
+
+static std::string FormatEntryDetails(disk_cache::Entry* entry) {
+ std::string result = EscapeForHTML(entry->GetKey());
+
+ net::HttpResponseInfo response;
+ net::HttpCache::ReadResponseInfo(entry, &response);
+
+ if (response.headers) {
+ result.append("<hr><pre>");
+ result.append(EscapeForHTML(response.headers->GetStatusLine()));
+ result.push_back('\n');
+
+ void* iter = NULL;
+ std::string name, value;
+ while (response.headers->EnumerateHeaderLines(&iter, &name, &value)) {
+ result.append(EscapeForHTML(name));
+ result.append(": ");
+ result.append(EscapeForHTML(value));
+ result.push_back('\n');
+ }
+ result.append("</pre>");
+ }
+
+ for (int i = 0; i < 2; ++i) {
+ result.append("<hr><pre>");
+
+ int data_size = entry->GetDataSize(i);
+
+ char* data = new char[data_size];
+ if (entry->ReadData(i, 0, data, data_size, NULL) == data_size)
+ HexDump(data, data_size, &result);
+
+ result.append("</pre>");
+ }
+
+ return result;
+}
+
+static std::string FormatStatistics(disk_cache::Backend* disk_cache) {
+ std::vector<std::pair<std::string, std::string> > stats;
+ disk_cache->GetStats(&stats);
+ std::string result;
+
+ for (size_t index = 0; index < stats.size(); index++) {
+ result.append(stats[index].first);
+ result.append(": ");
+ result.append(stats[index].second);
+ result.append("<br/>\n");
+ }
+
+ return result;
+}
+
+// static
+URLRequestJob* URLRequestViewCacheJob::Factory(URLRequest* request,
+ const std::string& scheme) {
+ return new URLRequestViewCacheJob(request);
+}
+
+bool URLRequestViewCacheJob::GetData(std::string* mime_type,
+ std::string* charset,
+ std::string* data) const {
+ mime_type->assign("text/html");
+ charset->assign("UTF-8");
+
+ disk_cache::Backend* disk_cache = GetDiskCache();
+ if (!disk_cache) {
+ data->assign("no disk cache");
+ return true;
+ }
+
+ if (request_->url().spec() == "view-cache:") {
+ data->assign(VIEW_CACHE_HEAD);
+ void* iter = NULL;
+ disk_cache::Entry* entry;
+ while (disk_cache->OpenNextEntry(&iter, &entry)) {
+ data->append(FormatEntryInfo(entry));
+ entry->Close();
+ }
+ data->append(VIEW_CACHE_TAIL);
+ } else if (request_->url().spec() == "view-cache:stats") {
+ data->assign(FormatStatistics(disk_cache));
+ } else {
+ disk_cache::Entry* entry;
+ if (disk_cache->OpenEntry(request_->url().path(), &entry)) {
+ data->assign(FormatEntryDetails(entry));
+ entry->Close();
+ } else {
+ data->assign("no matching cache entry");
+ }
+ }
+ return true;
+}
+
+disk_cache::Backend* URLRequestViewCacheJob::GetDiskCache() const {
+ if (!request_->context())
+ return NULL;
+
+ if (!request_->context()->http_transaction_factory())
+ return NULL;
+
+ net::HttpCache* http_cache =
+ request_->context()->http_transaction_factory()->GetCache();
+ if (!http_cache)
+ return NULL;
+
+ return http_cache->disk_cache();
+}
diff --git a/net/url_request/url_request_view_cache_job.h b/net/url_request/url_request_view_cache_job.h
new file mode 100644
index 0000000..db01664
--- /dev/null
+++ b/net/url_request/url_request_view_cache_job.h
@@ -0,0 +1,56 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_VIEW_CACHE_JOB_H__
+#define NET_URL_REQUEST_URL_REQUEST_VIEW_CACHE_JOB_H__
+
+#include "net/url_request/url_request_simple_job.h"
+
+namespace disk_cache {
+class Backend;
+}
+
+// A job subclass that implements the view-cache: protocol, which simply
+// provides a debug view of the cache or of a particular cache entry.
+class URLRequestViewCacheJob : public URLRequestSimpleJob {
+ public:
+ URLRequestViewCacheJob(URLRequest* request) : URLRequestSimpleJob(request) {}
+
+ static URLRequest::ProtocolFactory Factory;
+
+ // override from URLRequestSimpleJob
+ virtual bool GetData(std::string* mime_type,
+ std::string* charset,
+ std::string* data) const;
+
+ private:
+ disk_cache::Backend* GetDiskCache() const;
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_VIEW_CACHE_JOB_H__