summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--android_webview/browser/net/android_stream_reader_url_request_job.cc66
-rw-r--r--android_webview/browser/net/android_stream_reader_url_request_job.h4
-rw-r--r--chrome/browser/chromeos/fileapi/external_file_url_request_job.cc64
-rw-r--r--chrome/browser/chromeos/fileapi/external_file_url_request_job.h8
-rw-r--r--content/browser/android/url_request_content_job.cc81
-rw-r--r--content/browser/android/url_request_content_job.h6
-rw-r--r--content/browser/appcache/appcache_url_request_job.cc14
-rw-r--r--content/browser/appcache/appcache_url_request_job.h2
-rw-r--r--content/browser/fileapi/file_writer_delegate_unittest.cc8
-rw-r--r--content/browser/net/view_http_cache_job_factory.cc23
-rw-r--r--content/browser/service_worker/service_worker_read_from_cache_job.cc71
-rw-r--r--content/browser/service_worker/service_worker_read_from_cache_job.h3
-rw-r--r--content/browser/service_worker/service_worker_url_request_job.cc76
-rw-r--r--content/browser/service_worker/service_worker_url_request_job.h2
-rw-r--r--content/browser/service_worker/service_worker_write_to_cache_job.cc205
-rw-r--r--content/browser/service_worker/service_worker_write_to_cache_job.h32
-rw-r--r--content/browser/streams/stream_url_request_job.cc63
-rw-r--r--content/browser/streams/stream_url_request_job.h2
-rw-r--r--content/browser/webui/url_data_manager_backend.cc37
-rw-r--r--content/test/net/url_request_abort_on_end_job.cc17
-rw-r--r--content/test/net/url_request_abort_on_end_job.h2
-rw-r--r--ios/web/webui/url_data_manager_ios_backend.cc36
-rw-r--r--mojo/services/network/url_loader_impl_apptest.cc16
-rw-r--r--net/test/url_request/url_request_failed_job.cc51
-rw-r--r--net/test/url_request/url_request_failed_job.h3
-rw-r--r--net/test/url_request/url_request_mock_data_job.cc15
-rw-r--r--net/test/url_request/url_request_mock_data_job.h2
-rw-r--r--net/test/url_request/url_request_slow_download_job.cc27
-rw-r--r--net/test/url_request/url_request_slow_download_job.h2
-rw-r--r--net/url_request/url_request_file_dir_job.cc55
-rw-r--r--net/url_request/url_request_file_dir_job.h6
-rw-r--r--net/url_request/url_request_file_job.cc61
-rw-r--r--net/url_request/url_request_file_job.h6
-rw-r--r--net/url_request/url_request_ftp_job.cc31
-rw-r--r--net/url_request/url_request_ftp_job.h2
-rw-r--r--net/url_request/url_request_http_job.cc43
-rw-r--r--net/url_request/url_request_http_job.h5
-rw-r--r--net/url_request/url_request_job.cc231
-rw-r--r--net/url_request/url_request_job.h93
-rw-r--r--net/url_request/url_request_job_unittest.cc44
-rw-r--r--net/url_request/url_request_simple_job.cc35
-rw-r--r--net/url_request/url_request_simple_job.h3
-rw-r--r--net/url_request/url_request_status.cc15
-rw-r--r--net/url_request/url_request_status.h8
-rw-r--r--net/url_request/url_request_test_job.cc30
-rw-r--r--net/url_request/url_request_test_job.h2
-rw-r--r--storage/browser/blob/blob_url_request_job.cc42
-rw-r--r--storage/browser/blob/blob_url_request_job.h2
-rw-r--r--storage/browser/fileapi/file_system_dir_url_request_job.cc21
-rw-r--r--storage/browser/fileapi/file_system_dir_url_request_job.h2
-rw-r--r--storage/browser/fileapi/file_system_url_request_job.cc70
-rw-r--r--storage/browser/fileapi/file_system_url_request_job.h5
52 files changed, 925 insertions, 825 deletions
diff --git a/android_webview/browser/net/android_stream_reader_url_request_job.cc b/android_webview/browser/net/android_stream_reader_url_request_job.cc
index dc288fa..173bdbb 100644
--- a/android_webview/browser/net/android_stream_reader_url_request_job.cc
+++ b/android_webview/browser/net/android_stream_reader_url_request_job.cc
@@ -21,6 +21,7 @@
#include "content/public/browser/browser_thread.h"
#include "net/base/io_buffer.h"
#include "net/base/mime_util.h"
+#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/http/http_response_headers.h"
#include "net/http/http_response_info.h"
@@ -89,7 +90,6 @@ AndroidStreamReaderURLRequestJob::AndroidStreamReaderURLRequestJob(
net::NetworkDelegate* network_delegate,
scoped_ptr<Delegate> delegate)
: URLRequestJob(request, network_delegate),
- range_parse_result_(net::OK),
delegate_(delegate.Pass()),
weak_factory_(this) {
DCHECK(delegate_);
@@ -101,7 +101,6 @@ AndroidStreamReaderURLRequestJob::AndroidStreamReaderURLRequestJob(
scoped_ptr<DelegateObtainer> delegate_obtainer,
bool)
: URLRequestJob(request, network_delegate),
- range_parse_result_(net::OK),
delegate_obtainer_(delegate_obtainer.Pass()),
weak_factory_(this) {
DCHECK(delegate_obtainer_);
@@ -142,10 +141,7 @@ void AndroidStreamReaderURLRequestJob::Start() {
base::Bind(&AndroidStreamReaderURLRequestJob::DelegateObtained,
weak_factory_.GetWeakPtr()));
} else {
- // Run DoStart asynchronously to avoid re-entering the delegate.
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&AndroidStreamReaderURLRequestJob::DoStart,
- weak_factory_.GetWeakPtr()));
+ DoStart();
}
}
@@ -206,29 +202,45 @@ void AndroidStreamReaderURLRequestJob::OnReaderSeekCompleted(int result) {
set_expected_content_size(result);
HeadersComplete(kHTTPOk, kHTTPOkText);
} else {
- NotifyStartError(
- net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
}
}
void AndroidStreamReaderURLRequestJob::OnReaderReadCompleted(int result) {
DCHECK(thread_checker_.CalledOnValidThread());
-
- ReadRawDataComplete(result);
+ // The URLRequest API contract requires that:
+ // * NotifyDone be called once, to set the status code, indicate the job is
+ // finished (there will be no further IO),
+ // * NotifyReadComplete be called if false is returned from ReadRawData to
+ // indicate that the IOBuffer will not be used by the job anymore.
+ // There might be multiple calls to ReadRawData (and thus multiple calls to
+ // NotifyReadComplete), which is why NotifyDone is called only on errors
+ // (result < 0) and end of data (result == 0).
+ if (result == 0) {
+ NotifyDone(net::URLRequestStatus());
+ } else if (result < 0) {
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ } else {
+ // Clear the IO_PENDING status.
+ SetStatus(net::URLRequestStatus());
+ }
+ NotifyReadComplete(result);
}
base::TaskRunner* AndroidStreamReaderURLRequestJob::GetWorkerThreadRunner() {
return static_cast<base::TaskRunner*>(BrowserThread::GetBlockingPool());
}
-int AndroidStreamReaderURLRequestJob::ReadRawData(net::IOBuffer* dest,
- int dest_size) {
+bool AndroidStreamReaderURLRequestJob::ReadRawData(net::IOBuffer* dest,
+ int dest_size,
+ int* bytes_read) {
DCHECK(thread_checker_.CalledOnValidThread());
if (!input_stream_reader_wrapper_.get()) {
// This will happen if opening the InputStream fails in which case the
// error is communicated by setting the HTTP response status header rather
// than failing the request during the header fetch phase.
- return 0;
+ *bytes_read = 0;
+ return true;
}
PostTaskAndReplyWithResult(
@@ -239,7 +251,9 @@ int AndroidStreamReaderURLRequestJob::ReadRawData(net::IOBuffer* dest,
base::Bind(&AndroidStreamReaderURLRequestJob::OnReaderReadCompleted,
weak_factory_.GetWeakPtr()));
- return net::ERR_IO_PENDING;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING,
+ net::ERR_IO_PENDING));
+ return false;
}
bool AndroidStreamReaderURLRequestJob::GetMimeType(
@@ -289,11 +303,6 @@ void AndroidStreamReaderURLRequestJob::DelegateObtained(
void AndroidStreamReaderURLRequestJob::DoStart() {
DCHECK(thread_checker_.CalledOnValidThread());
- if (range_parse_result_ != net::OK) {
- NotifyStartError(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- range_parse_result_));
- return;
- }
// Start reading asynchronously so that all error reporting and data
// callbacks happen as they would for network requests.
SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING,
@@ -374,18 +383,19 @@ void AndroidStreamReaderURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
- // This job only cares about the Range header so that we know how many bytes
- // in the stream to skip and how many to read after that. Note that
- // validation is deferred to DoStart(), because NotifyStartError() is not
- // legal to call since the job has not started.
+ // We only extract the "Range" header so that we know how many bytes in the
+ // stream to skip and how many to read after that.
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
- if (ranges.size() == 1)
+ if (ranges.size() == 1) {
byte_range_ = ranges[0];
- } else {
- // We don't support multiple range requests in one single URL request,
- // because we need to do multipart encoding here.
- range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
+ } else {
+ // We don't support multiple range requests in one single URL request,
+ // because we need to do multipart encoding here.
+ NotifyDone(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ }
}
}
}
diff --git a/android_webview/browser/net/android_stream_reader_url_request_job.h b/android_webview/browser/net/android_stream_reader_url_request_job.h
index 645be13..e21d765 100644
--- a/android_webview/browser/net/android_stream_reader_url_request_job.h
+++ b/android_webview/browser/net/android_stream_reader_url_request_job.h
@@ -15,7 +15,6 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread_checker.h"
-#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request_job.h"
@@ -97,7 +96,7 @@ class AndroidStreamReaderURLRequestJob : public net::URLRequestJob {
// URLRequestJob:
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
bool GetMimeType(std::string* mime_type) const override;
bool GetCharset(std::string* charset) override;
@@ -132,7 +131,6 @@ class AndroidStreamReaderURLRequestJob : public net::URLRequestJob {
void OnReaderReadCompleted(int bytes_read);
net::HttpByteRange byte_range_;
- net::Error range_parse_result_;
scoped_ptr<net::HttpResponseInfo> response_info_;
scoped_ptr<Delegate> delegate_;
scoped_ptr<DelegateObtainer> delegate_obtainer_;
diff --git a/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc b/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc
index 917cbcb..676e356 100644
--- a/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc
+++ b/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc
@@ -10,7 +10,6 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
-#include "base/thread_task_runner_handle.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/chromeos/file_manager/fileapi_util.h"
#include "chrome/browser/chromeos/fileapi/external_file_url_util.h"
@@ -20,6 +19,7 @@
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/storage_partition.h"
#include "content/public/common/url_constants.h"
+#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/http/http_request_headers.h"
#include "net/http/http_response_info.h"
@@ -163,7 +163,6 @@ ExternalFileURLRequestJob::ExternalFileURLRequestJob(
net::NetworkDelegate* network_delegate)
: net::URLRequestJob(request, network_delegate),
profile_id_(profile_id),
- range_parse_result_(net::OK),
remaining_bytes_(0),
weak_ptr_factory_(this) {}
@@ -171,40 +170,24 @@ void ExternalFileURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
- // Currently this job only cares about the Range header, and only supports
- // single range requests. Note that validation is deferred to Start,
- // because NotifyStartError is not legal to call since the job has not
- // started.
+ // Note: We only support single range requests.
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges) &&
ranges.size() == 1) {
byte_range_ = ranges[0];
} else {
- range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
+ // Failed to parse Range: header, so notify the error.
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
}
}
}
void ExternalFileURLRequestJob::Start() {
- // Post a task to invoke StartAsync asynchronously to avoid re-entering the
- // delegate, because NotifyStartError is not legal to call synchronously in
- // Start().
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&ExternalFileURLRequestJob::StartAsync,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void ExternalFileURLRequestJob::StartAsync() {
DVLOG(1) << "Starting request";
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DCHECK(!stream_reader_);
- if (range_parse_result_ != net::OK) {
- NotifyStartError(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- range_parse_result_));
- return;
- }
-
// We only support GET request.
if (request()->method() != "GET") {
LOG(WARNING) << "Failed to start request: " << request()->method()
@@ -344,23 +327,37 @@ bool ExternalFileURLRequestJob::IsRedirectResponse(GURL* location,
return true;
}
-int ExternalFileURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
+bool ExternalFileURLRequestJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DCHECK(stream_reader_);
- if (remaining_bytes_ == 0)
- return 0;
+ if (remaining_bytes_ == 0) {
+ *bytes_read = 0;
+ return true;
+ }
const int result = stream_reader_->Read(
buf, std::min<int64>(buf_size, remaining_bytes_),
base::Bind(&ExternalFileURLRequestJob::OnReadCompleted,
weak_ptr_factory_.GetWeakPtr()));
- if (result < 0)
- return result;
+ if (result == net::ERR_IO_PENDING) {
+ // The data is not yet available.
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
+ }
+ if (result < 0) {
+ // An error occurs.
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ return false;
+ }
+ // Reading has been finished immediately.
+ *bytes_read = result;
remaining_bytes_ -= result;
- return result;
+ return true;
}
ExternalFileURLRequestJob::~ExternalFileURLRequestJob() {
@@ -369,10 +366,15 @@ ExternalFileURLRequestJob::~ExternalFileURLRequestJob() {
void ExternalFileURLRequestJob::OnReadCompleted(int read_result) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
- if (read_result > 0)
- remaining_bytes_ -= read_result;
+ if (read_result < 0) {
+ DCHECK_NE(read_result, net::ERR_IO_PENDING);
+ NotifyDone(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, read_result));
+ }
- ReadRawDataComplete(read_result);
+ remaining_bytes_ -= read_result;
+ SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status.
+ NotifyReadComplete(read_result);
}
} // namespace chromeos
diff --git a/chrome/browser/chromeos/fileapi/external_file_url_request_job.h b/chrome/browser/chromeos/fileapi/external_file_url_request_job.h
index dff3379..a026e43 100644
--- a/chrome/browser/chromeos/fileapi/external_file_url_request_job.h
+++ b/chrome/browser/chromeos/fileapi/external_file_url_request_job.h
@@ -63,17 +63,12 @@ class ExternalFileURLRequestJob : public net::URLRequestJob {
void Kill() override;
bool GetMimeType(std::string* mime_type) const override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
protected:
~ExternalFileURLRequestJob() override;
private:
- // Helper method to start the job. Should be called asynchronously because
- // NotifyStartError() is not legal to call synchronously in
- // URLRequestJob::Start().
- void StartAsync();
-
// Called from an internal helper class defined in drive_url_request_job.cc,
// which is running on the UI thread.
void OnHelperResultObtained(
@@ -96,7 +91,6 @@ class ExternalFileURLRequestJob : public net::URLRequestJob {
void* const profile_id_;
// The range of the file to be returned.
- net::Error range_parse_result_;
net::HttpByteRange byte_range_;
int64 remaining_bytes_;
diff --git a/content/browser/android/url_request_content_job.cc b/content/browser/android/url_request_content_job.cc
index 1bcbf210..6e661a2 100644
--- a/content/browser/android/url_request_content_job.cc
+++ b/content/browser/android/url_request_content_job.cc
@@ -11,6 +11,7 @@
#include "base/task_runner.h"
#include "net/base/file_stream.h"
#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
#include "net/http/http_util.h"
#include "net/url_request/url_request_error_job.h"
#include "url/gurl.h"
@@ -33,7 +34,6 @@ URLRequestContentJob::URLRequestContentJob(
content_path_(content_path),
stream_(new net::FileStream(content_task_runner)),
content_task_runner_(content_task_runner),
- range_parse_result_(net::OK),
remaining_bytes_(0),
io_pending_(false),
weak_ptr_factory_(this) {}
@@ -56,28 +56,43 @@ void URLRequestContentJob::Kill() {
net::URLRequestJob::Kill();
}
-int URLRequestContentJob::ReadRawData(net::IOBuffer* dest, int dest_size) {
+bool URLRequestContentJob::ReadRawData(net::IOBuffer* dest,
+ int dest_size,
+ int* bytes_read) {
DCHECK_GT(dest_size, 0);
+ DCHECK(bytes_read);
DCHECK_GE(remaining_bytes_, 0);
if (remaining_bytes_ < dest_size)
- dest_size = remaining_bytes_;
+ dest_size = static_cast<int>(remaining_bytes_);
// If we should copy zero bytes because |remaining_bytes_| is zero, short
// circuit here.
- if (!dest_size)
- return 0;
+ if (!dest_size) {
+ *bytes_read = 0;
+ return true;
+ }
- int rv = stream_->Read(dest, dest_size,
- base::Bind(&URLRequestContentJob::DidRead,
- weak_ptr_factory_.GetWeakPtr()));
+ int rv =
+ stream_->Read(dest, dest_size, base::Bind(&URLRequestContentJob::DidRead,
+ weak_ptr_factory_.GetWeakPtr(),
+ make_scoped_refptr(dest)));
+ if (rv >= 0) {
+ // Data is immediately available.
+ *bytes_read = rv;
+ remaining_bytes_ -= rv;
+ DCHECK_GE(remaining_bytes_, 0);
+ return true;
+ }
+
+ // Otherwise, a read error occured. We may just need to wait...
if (rv == net::ERR_IO_PENDING) {
io_pending_ = true;
- } else if (rv > 0) {
- remaining_bytes_ -= rv;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ } else {
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, rv));
}
- DCHECK_GE(remaining_bytes_, 0);
- return rv;
+ return false;
}
bool URLRequestContentJob::IsRedirectResponse(GURL* location,
@@ -100,16 +115,15 @@ void URLRequestContentJob::SetExtraRequestHeaders(
if (!headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header))
return;
- // Currently this job only cares about the Range header. Note that validation
- // is deferred to DidOpen(), because NotifyStartError is not legal to call
- // since the job has not started.
+ // We only care about "Range" header here.
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
byte_range_ = ranges[0];
} else {
// We don't support multiple range requests.
- range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
}
}
}
@@ -146,20 +160,13 @@ void URLRequestContentJob::DidFetchMetaInfo(const ContentMetaInfo* meta_info) {
void URLRequestContentJob::DidOpen(int result) {
if (result != net::OK) {
- NotifyStartError(
- net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
- return;
- }
-
- if (range_parse_result_ != net::OK) {
- NotifyStartError(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- range_parse_result_));
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
return;
}
if (!byte_range_.ComputeBounds(meta_info_.content_size)) {
- NotifyStartError(net::URLRequestStatus(
- net::URLRequestStatus::FAILED, net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -186,8 +193,8 @@ void URLRequestContentJob::DidOpen(int result) {
void URLRequestContentJob::DidSeek(int64 result) {
if (result != byte_range_.first_byte_position()) {
- NotifyStartError(net::URLRequestStatus(
- net::URLRequestStatus::FAILED, net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -195,16 +202,24 @@ void URLRequestContentJob::DidSeek(int64 result) {
NotifyHeadersComplete();
}
-void URLRequestContentJob::DidRead(int result) {
- DCHECK(io_pending_);
- io_pending_ = false;
-
+void URLRequestContentJob::DidRead(scoped_refptr<net::IOBuffer> buf,
+ int result) {
if (result > 0) {
+ SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
remaining_bytes_ -= result;
DCHECK_GE(remaining_bytes_, 0);
}
- ReadRawDataComplete(result);
+ DCHECK(io_pending_);
+ io_pending_ = false;
+
+ if (result == 0) {
+ NotifyDone(net::URLRequestStatus());
+ } else if (result < 0) {
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ }
+
+ NotifyReadComplete(result);
}
} // namespace content
diff --git a/content/browser/android/url_request_content_job.h b/content/browser/android/url_request_content_job.h
index dad9fbf..5d3d933 100644
--- a/content/browser/android/url_request_content_job.h
+++ b/content/browser/android/url_request_content_job.h
@@ -12,7 +12,6 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "content/common/content_export.h"
-#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_job.h"
@@ -43,7 +42,7 @@ class CONTENT_EXPORT URLRequestContentJob : public net::URLRequestJob {
// net::URLRequestJob:
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
bool GetMimeType(std::string* mime_type) const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
@@ -80,7 +79,7 @@ class CONTENT_EXPORT URLRequestContentJob : public net::URLRequestJob {
void DidSeek(int64 result);
// Callback after data is asynchronously read from the content URI into |buf|.
- void DidRead(int result);
+ void DidRead(scoped_refptr<net::IOBuffer> buf, int result);
// The full path of the content URI.
base::FilePath content_path_;
@@ -90,7 +89,6 @@ class CONTENT_EXPORT URLRequestContentJob : public net::URLRequestJob {
const scoped_refptr<base::TaskRunner> content_task_runner_;
net::HttpByteRange byte_range_;
- net::Error range_parse_result_;
int64 remaining_bytes_;
bool io_pending_;
diff --git a/content/browser/appcache/appcache_url_request_job.cc b/content/browser/appcache/appcache_url_request_job.cc
index de924a1..82997f7 100644
--- a/content/browser/appcache/appcache_url_request_job.cc
+++ b/content/browser/appcache/appcache_url_request_job.cc
@@ -341,6 +341,7 @@ void AppCacheURLRequestJob::SetupRangeResponse() {
void AppCacheURLRequestJob::OnReadComplete(int result) {
DCHECK(is_delivering_appcache_response());
if (result == 0) {
+ NotifyDone(net::URLRequestStatus());
AppCacheHistograms::CountResponseRetrieval(
true, is_main_resource_, manifest_url_.GetOrigin());
} else if (result < 0) {
@@ -348,10 +349,13 @@ void AppCacheURLRequestJob::OnReadComplete(int result) {
storage_->service()->CheckAppCacheResponse(manifest_url_, cache_id_,
entry_.response_id());
}
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
AppCacheHistograms::CountResponseRetrieval(
false, is_main_resource_, manifest_url_.GetOrigin());
+ } else {
+ SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
}
- ReadRawDataComplete(result);
+ NotifyReadComplete(result);
}
// net::URLRequestJob overrides ------------------------------------------------
@@ -420,14 +424,18 @@ int AppCacheURLRequestJob::GetResponseCode() const {
return http_info()->headers->response_code();
}
-int AppCacheURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
+bool AppCacheURLRequestJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
DCHECK(is_delivering_appcache_response());
DCHECK_NE(buf_size, 0);
+ DCHECK(bytes_read);
DCHECK(!reader_->IsReadPending());
reader_->ReadData(buf, buf_size,
base::Bind(&AppCacheURLRequestJob::OnReadComplete,
base::Unretained(this)));
- return net::ERR_IO_PENDING;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
}
void AppCacheURLRequestJob::SetExtraRequestHeaders(
diff --git a/content/browser/appcache/appcache_url_request_job.h b/content/browser/appcache/appcache_url_request_job.h
index 8f51e7b..95a3e18 100644
--- a/content/browser/appcache/appcache_url_request_job.h
+++ b/content/browser/appcache/appcache_url_request_job.h
@@ -148,7 +148,7 @@ class CONTENT_EXPORT AppCacheURLRequestJob
net::LoadState GetLoadState() const override;
bool GetCharset(std::string* charset) override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
// Sets extra request headers for Job types that support request headers.
// This is how we get informed of range-requests.
diff --git a/content/browser/fileapi/file_writer_delegate_unittest.cc b/content/browser/fileapi/file_writer_delegate_unittest.cc
index 9eb73b1..c9bcfbb 100644
--- a/content/browser/fileapi/file_writer_delegate_unittest.cc
+++ b/content/browser/fileapi/file_writer_delegate_unittest.cc
@@ -184,15 +184,17 @@ class FileWriterDelegateTestJob : public net::URLRequestJob {
base::Bind(&FileWriterDelegateTestJob::NotifyHeadersComplete, this));
}
- int ReadRawData(net::IOBuffer* buf, int buf_size) override {
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override {
if (remaining_bytes_ < buf_size)
- buf_size = remaining_bytes_;
+ buf_size = static_cast<int>(remaining_bytes_);
for (int i = 0; i < buf_size; ++i)
buf->data()[i] = content_[cursor_++];
remaining_bytes_ -= buf_size;
- return buf_size;
+ SetStatus(net::URLRequestStatus());
+ *bytes_read = buf_size;
+ return true;
}
int GetResponseCode() const override { return 200; }
diff --git a/content/browser/net/view_http_cache_job_factory.cc b/content/browser/net/view_http_cache_job_factory.cc
index 22b408e..823ced9 100644
--- a/content/browser/net/view_http_cache_job_factory.cc
+++ b/content/browser/net/view_http_cache_job_factory.cc
@@ -45,8 +45,15 @@ class ViewHttpCacheJob : public net::URLRequestJob {
bool GetCharset(std::string* charset) override {
return core_->GetCharset(charset);
}
- int ReadRawData(net::IOBuffer* buf, int buf_size) override {
- return core_->ReadRawData(buf, buf_size);
+ bool ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* out_bytes_read) override {
+ size_t bytes_read;
+ if (!core_->ReadRawData(buf, base::checked_cast<size_t>(buf_size),
+ &bytes_read))
+ return false;
+ *out_bytes_read = base::checked_cast<int>(bytes_read);
+ return true;
}
private:
@@ -66,7 +73,7 @@ class ViewHttpCacheJob : public net::URLRequestJob {
bool GetMimeType(std::string* mime_type) const;
bool GetCharset(std::string* charset);
- int ReadRawData(net::IOBuffer* buf, int buf_size);
+ bool ReadRawData(net::IOBuffer* buf, size_t buf_size, size_t* bytes_read);
private:
friend class base::RefCounted<Core>;
@@ -165,14 +172,18 @@ bool ViewHttpCacheJob::Core::GetCharset(std::string* charset) {
return true;
}
-int ViewHttpCacheJob::Core::ReadRawData(net::IOBuffer* buf, int buf_size) {
+bool ViewHttpCacheJob::Core::ReadRawData(net::IOBuffer* buf,
+ size_t buf_size,
+ size_t* bytes_read) {
+ DCHECK(bytes_read);
DCHECK_LE(data_offset_, data_.size());
- int remaining = base::checked_cast<int>(data_.size() - data_offset_);
+ size_t remaining = data_.size() - data_offset_;
if (buf_size > remaining)
buf_size = remaining;
memcpy(buf->data(), data_.data() + data_offset_, buf_size);
data_offset_ += buf_size;
- return buf_size;
+ *bytes_read = buf_size;
+ return true;
}
void ViewHttpCacheJob::Core::OnIOComplete(int result) {
diff --git a/content/browser/service_worker/service_worker_read_from_cache_job.cc b/content/browser/service_worker/service_worker_read_from_cache_job.cc
index 6aa92af8..9357991 100644
--- a/content/browser/service_worker/service_worker_read_from_cache_job.cc
+++ b/content/browser/service_worker/service_worker_read_from_cache_job.cc
@@ -41,9 +41,26 @@ ServiceWorkerReadFromCacheJob::~ServiceWorkerReadFromCacheJob() {
}
void ServiceWorkerReadFromCacheJob::Start() {
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&ServiceWorkerReadFromCacheJob::StartAsync,
- weak_factory_.GetWeakPtr()));
+ TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
+ "ServiceWorkerReadFromCacheJob::ReadInfo", this,
+ "URL", request_->url().spec());
+ if (!context_) {
+ NotifyStartError(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
+ return;
+ }
+
+ // Create a response reader and start reading the headers,
+ // we'll continue when thats done.
+ if (is_main_script())
+ version_->embedded_worker()->OnScriptReadStarted();
+ reader_ = context_->storage()->CreateResponseReader(resource_id_);
+ http_info_io_buffer_ = new HttpResponseInfoIOBuffer;
+ reader_->ReadInfo(
+ http_info_io_buffer_.get(),
+ base::Bind(&ServiceWorkerReadFromCacheJob::OnReadInfoComplete,
+ weak_factory_.GetWeakPtr()));
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
}
void ServiceWorkerReadFromCacheJob::Kill() {
@@ -105,9 +122,11 @@ void ServiceWorkerReadFromCacheJob::SetExtraRequestHeaders(
range_requested_ = ranges[0];
}
-int ServiceWorkerReadFromCacheJob::ReadRawData(net::IOBuffer* buf,
- int buf_size) {
+bool ServiceWorkerReadFromCacheJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
DCHECK_NE(buf_size, 0);
+ DCHECK(bytes_read);
DCHECK(!reader_->IsReadPending());
TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
"ServiceWorkerReadFromCacheJob::ReadRawData",
@@ -116,31 +135,8 @@ int ServiceWorkerReadFromCacheJob::ReadRawData(net::IOBuffer* buf,
reader_->ReadData(buf, buf_size,
base::Bind(&ServiceWorkerReadFromCacheJob::OnReadComplete,
weak_factory_.GetWeakPtr()));
- return net::ERR_IO_PENDING;
-}
-
-void ServiceWorkerReadFromCacheJob::StartAsync() {
- TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
- "ServiceWorkerReadFromCacheJob::ReadInfo", this,
- "URL", request_->url().spec());
- if (!context_) {
- // NotifyStartError is not safe to call synchronously in Start.
- NotifyStartError(
- net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
- return;
- }
-
- // Create a response reader and start reading the headers,
- // we'll continue when thats done.
- if (is_main_script())
- version_->embedded_worker()->OnScriptReadStarted();
- reader_ = context_->storage()->CreateResponseReader(resource_id_);
- http_info_io_buffer_ = new HttpResponseInfoIOBuffer;
- reader_->ReadInfo(
- http_info_io_buffer_.get(),
- base::Bind(&ServiceWorkerReadFromCacheJob::OnReadInfoComplete,
- weak_factory_.GetWeakPtr()));
SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
}
const net::HttpResponseInfo* ServiceWorkerReadFromCacheJob::http_info() const {
@@ -158,8 +154,6 @@ void ServiceWorkerReadFromCacheJob::OnReadInfoComplete(int result) {
ServiceWorkerMetrics::CountReadResponseResult(
ServiceWorkerMetrics::READ_HEADERS_ERROR);
Done(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
- NotifyStartError(
- net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
return;
}
DCHECK_GE(result, 0);
@@ -213,22 +207,23 @@ void ServiceWorkerReadFromCacheJob::Done(const net::URLRequestStatus& status) {
}
if (is_main_script())
version_->embedded_worker()->OnScriptReadFinished();
+ NotifyDone(status);
}
void ServiceWorkerReadFromCacheJob::OnReadComplete(int result) {
ServiceWorkerMetrics::ReadResponseResult check_result;
-
- if (result >= 0) {
+ if (result == 0) {
check_result = ServiceWorkerMetrics::READ_OK;
- if (result == 0)
- Done(net::URLRequestStatus());
- } else {
+ Done(net::URLRequestStatus());
+ } else if (result < 0) {
check_result = ServiceWorkerMetrics::READ_DATA_ERROR;
Done(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ } else {
+ check_result = ServiceWorkerMetrics::READ_OK;
+ SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
}
-
ServiceWorkerMetrics::CountReadResponseResult(check_result);
- ReadRawDataComplete(result);
+ NotifyReadComplete(result);
TRACE_EVENT_ASYNC_END1("ServiceWorker",
"ServiceWorkerReadFromCacheJob::ReadRawData",
this,
diff --git a/content/browser/service_worker/service_worker_read_from_cache_job.h b/content/browser/service_worker/service_worker_read_from_cache_job.h
index a62893d..fccde7a 100644
--- a/content/browser/service_worker/service_worker_read_from_cache_job.h
+++ b/content/browser/service_worker/service_worker_read_from_cache_job.h
@@ -51,14 +51,13 @@ class CONTENT_EXPORT ServiceWorkerReadFromCacheJob
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
// Reader completion callbacks.
void OnReadInfoComplete(int result);
void OnReadComplete(int result);
// Helpers
- void StartAsync();
const net::HttpResponseInfo* http_info() const;
bool is_range_request() const { return range_requested_.IsValid(); }
void SetupRangeResponse(int response_data_size);
diff --git a/content/browser/service_worker/service_worker_url_request_job.cc b/content/browser/service_worker/service_worker_url_request_job.cc
index 143e354..15c85a1 100644
--- a/content/browser/service_worker/service_worker_url_request_job.cc
+++ b/content/browser/service_worker/service_worker_url_request_job.cc
@@ -203,44 +203,50 @@ void ServiceWorkerURLRequestJob::SetExtraRequestHeaders(
byte_range_ = ranges[0];
}
-int ServiceWorkerURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
+bool ServiceWorkerURLRequestJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
DCHECK(buf);
DCHECK_GE(buf_size, 0);
+ DCHECK(bytes_read);
DCHECK(waiting_stream_url_.is_empty());
-
- int bytes_read = 0;
-
if (stream_.get()) {
- switch (stream_->ReadRawData(buf, buf_size, &bytes_read)) {
+ switch (stream_->ReadRawData(buf, buf_size, bytes_read)) {
case Stream::STREAM_HAS_DATA:
- DCHECK_GT(bytes_read, 0);
- return bytes_read;
+ DCHECK_GT(*bytes_read, 0);
+ return true;
case Stream::STREAM_COMPLETE:
- DCHECK_EQ(0, bytes_read);
+ DCHECK(!*bytes_read);
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_STREAM_RESPONSE);
- return 0;
+ return true;
case Stream::STREAM_EMPTY:
stream_pending_buffer_ = buf;
stream_pending_buffer_size_ = buf_size;
- return net::ERR_IO_PENDING;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_ERROR_STREAM_ABORTED);
- return net::ERR_CONNECTION_RESET;
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_CONNECTION_RESET));
+ return false;
}
NOTREACHED();
- return net::ERR_FAILED;
+ return false;
}
- if (!blob_request_)
- return 0;
- blob_request_->Read(buf, buf_size, &bytes_read);
+ if (!blob_request_) {
+ *bytes_read = 0;
+ return true;
+ }
+ blob_request_->Read(buf, buf_size, bytes_read);
net::URLRequestStatus status = blob_request_->status();
- if (status.status() != net::URLRequestStatus::SUCCESS)
- return status.error();
- if (bytes_read == 0)
+ SetStatus(status);
+ if (status.is_io_pending())
+ return false;
+ if (status.is_success() && *bytes_read == 0)
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_BLOB_RESPONSE);
- return bytes_read;
+ return status.is_success();
}
// TODO(falken): Refactor Blob and Stream specific handling to separate classes.
@@ -286,18 +292,29 @@ void ServiceWorkerURLRequestJob::OnResponseStarted(net::URLRequest* request) {
void ServiceWorkerURLRequestJob::OnReadCompleted(net::URLRequest* request,
int bytes_read) {
+ SetStatus(request->status());
if (!request->status().is_success()) {
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_ERROR_BLOB_READ);
- } else if (bytes_read == 0) {
+ NotifyDone(request->status());
+ return;
+ }
+
+ if (bytes_read == 0) {
+ // Protect because NotifyReadComplete() can destroy |this|.
+ scoped_refptr<ServiceWorkerURLRequestJob> protect(this);
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_BLOB_RESPONSE);
+ NotifyReadComplete(bytes_read);
+ NotifyDone(request->status());
+ return;
}
- net::URLRequestStatus status = request->status();
- ReadRawDataComplete(status.is_success() ? bytes_read : status.error());
+ NotifyReadComplete(bytes_read);
}
// Overrides for Stream reading -----------------------------------------------
void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
+ // Clear the IO_PENDING status.
+ SetStatus(net::URLRequestStatus());
// Do nothing if stream_pending_buffer_ is empty, i.e. there's no ReadRawData
// operation waiting for IO completion.
if (!stream_pending_buffer_.get())
@@ -306,15 +323,15 @@ void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
// stream_pending_buffer_ is set to the IOBuffer instance provided to
// ReadRawData() by URLRequestJob.
- int result = 0;
+ int bytes_read = 0;
switch (stream_->ReadRawData(stream_pending_buffer_.get(),
- stream_pending_buffer_size_, &result)) {
+ stream_pending_buffer_size_, &bytes_read)) {
case Stream::STREAM_HAS_DATA:
- DCHECK_GT(result, 0);
+ DCHECK_GT(bytes_read, 0);
break;
case Stream::STREAM_COMPLETE:
// Calling NotifyReadComplete with 0 signals completion.
- DCHECK(!result);
+ DCHECK(!bytes_read);
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_STREAM_RESPONSE);
break;
case Stream::STREAM_EMPTY:
@@ -322,8 +339,9 @@ void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
break;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
- result = net::ERR_CONNECTION_RESET;
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_ERROR_STREAM_ABORTED);
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_CONNECTION_RESET));
break;
}
@@ -331,7 +349,7 @@ void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
// safe for the observer to read.
stream_pending_buffer_ = nullptr;
stream_pending_buffer_size_ = 0;
- ReadRawDataComplete(result);
+ NotifyReadComplete(bytes_read);
}
void ServiceWorkerURLRequestJob::OnStreamRegistered(Stream* stream) {
@@ -627,7 +645,7 @@ void ServiceWorkerURLRequestJob::DidDispatchFetchEvent(
// error.
if (response.status_code == 0) {
RecordStatusZeroResponseError(response.error);
- NotifyStartError(
+ NotifyDone(
net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
return;
}
diff --git a/content/browser/service_worker/service_worker_url_request_job.h b/content/browser/service_worker/service_worker_url_request_job.h
index 2a50c94..4815c0b 100644
--- a/content/browser/service_worker/service_worker_url_request_job.h
+++ b/content/browser/service_worker/service_worker_url_request_job.h
@@ -87,7 +87,7 @@ class CONTENT_EXPORT ServiceWorkerURLRequestJob
void GetLoadTimingInfo(net::LoadTimingInfo* load_timing_info) const override;
int GetResponseCode() const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
// net::URLRequest::Delegate overrides that read the blob from the
// ServiceWorkerFetchResponse.
diff --git a/content/browser/service_worker/service_worker_write_to_cache_job.cc b/content/browser/service_worker/service_worker_write_to_cache_job.cc
index ba605c0..24bcd0f 100644
--- a/content/browser/service_worker/service_worker_write_to_cache_job.cc
+++ b/content/browser/service_worker/service_worker_write_to_cache_job.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/strings/stringprintf.h"
-#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "content/browser/service_worker/service_worker_cache_writer.h"
#include "content/browser/service_worker/service_worker_context_core.h"
@@ -48,7 +47,7 @@ const char kServiceWorkerAllowed[] = "Service-Worker-Allowed";
// developers.
// TODO(falken): Redesign this class so we don't have to fail at the network
// stack layer just to cancel the update.
-const net::Error kIdenticalScriptError = net::ERR_FILE_EXISTS;
+const int kIdenticalScriptError = net::ERR_FILE_EXISTS;
} // namespace
@@ -80,18 +79,11 @@ ServiceWorkerWriteToCacheJob::~ServiceWorkerWriteToCacheJob() {
}
void ServiceWorkerWriteToCacheJob::Start() {
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&ServiceWorkerWriteToCacheJob::StartAsync,
- weak_factory_.GetWeakPtr()));
-}
-
-void ServiceWorkerWriteToCacheJob::StartAsync() {
TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
"ServiceWorkerWriteToCacheJob::ExecutingJob",
this,
"URL", request_->url().spec());
if (!context_) {
- // NotifyStartError is not safe to call synchronously in Start().
NotifyStartError(
net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
return;
@@ -116,9 +108,8 @@ void ServiceWorkerWriteToCacheJob::Kill() {
has_been_killed_ = true;
net_request_.reset();
if (did_notify_started_) {
- net::Error error = NotifyFinishedCaching(
- net::URLRequestStatus::FromError(net::ERR_ABORTED), kKilledError);
- DCHECK_EQ(net::ERR_ABORTED, error);
+ NotifyFinishedCaching(net::URLRequestStatus::FromError(net::ERR_ABORTED),
+ kKilledError);
}
writer_.reset();
context_.reset();
@@ -165,20 +156,40 @@ void ServiceWorkerWriteToCacheJob::SetExtraRequestHeaders(
net_request_->SetExtraRequestHeaders(headers);
}
-int ServiceWorkerWriteToCacheJob::ReadRawData(net::IOBuffer* buf,
- int buf_size) {
- int bytes_read = 0;
- net::URLRequestStatus status = ReadNetData(buf, buf_size, &bytes_read);
+bool ServiceWorkerWriteToCacheJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
+ net::URLRequestStatus status = ReadNetData(buf, buf_size, bytes_read);
+ SetStatus(status);
if (status.is_io_pending())
- return net::ERR_IO_PENDING;
+ return false;
+
+ if (!status.is_success()) {
+ NotifyDoneHelper(status, kFetchScriptError);
+ return false;
+ }
+
+ HandleNetData(*bytes_read);
+ status = GetStatus();
+
+ // Synchronous EOFs that do not replace the incumbent entry are considered
+ // failures. Since normally the URLRequestJob's status would be set by
+ // ReadNetData or HandleNetData, this code has to manually fix up the status
+ // to match the failure this function is about to return.
+ if (status.status() == net::URLRequestStatus::SUCCESS && *bytes_read == 0 &&
+ !cache_writer_->did_replace()) {
+ status = net::URLRequestStatus::FromError(kIdenticalScriptError);
+ }
if (!status.is_success()) {
- net::Error error = NotifyFinishedCaching(status, kFetchScriptError);
- DCHECK_EQ(status.error(), error);
- return error;
+ NotifyDoneHelper(status, "");
+ return false;
}
- return HandleNetData(bytes_read);
+ // Since URLRequestStatus::is_success() means "SUCCESS or IO_PENDING", but the
+ // contract of this function is "return true for synchronous successes only",
+ // it is important to test against SUCCESS explicitly here.
+ return status.status() == net::URLRequestStatus::SUCCESS;
}
const net::HttpResponseInfo* ServiceWorkerWriteToCacheJob::http_info() const {
@@ -233,9 +244,9 @@ void ServiceWorkerWriteToCacheJob::OnReceivedRedirect(
TRACE_EVENT0("ServiceWorker",
"ServiceWorkerWriteToCacheJob::OnReceivedRedirect");
// Script resources can't redirect.
- NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_UNSAFE_REDIRECT),
- kRedirectError);
+ NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_UNSAFE_REDIRECT),
+ kRedirectError);
}
void ServiceWorkerWriteToCacheJob::OnAuthRequired(
@@ -245,7 +256,7 @@ void ServiceWorkerWriteToCacheJob::OnAuthRequired(
TRACE_EVENT0("ServiceWorker",
"ServiceWorkerWriteToCacheJob::OnAuthRequired");
// TODO(michaeln): Pass this thru to our jobs client.
- NotifyStartErrorHelper(
+ NotifyDoneHelper(
net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED),
kClientAuthenticationError);
}
@@ -258,7 +269,7 @@ void ServiceWorkerWriteToCacheJob::OnCertificateRequested(
"ServiceWorkerWriteToCacheJob::OnCertificateRequested");
// TODO(michaeln): Pass this thru to our jobs client.
// see NotifyCertificateRequested.
- NotifyStartErrorHelper(
+ NotifyDoneHelper(
net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED),
kClientAuthenticationError);
}
@@ -272,9 +283,9 @@ void ServiceWorkerWriteToCacheJob::OnSSLCertificateError(
"ServiceWorkerWriteToCacheJob::OnSSLCertificateError");
// TODO(michaeln): Pass this thru to our jobs client,
// see NotifySSLCertificateError.
- NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- kSSLError);
+ NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ kSSLError);
}
void ServiceWorkerWriteToCacheJob::OnBeforeNetworkStart(
@@ -290,15 +301,15 @@ void ServiceWorkerWriteToCacheJob::OnResponseStarted(
net::URLRequest* request) {
DCHECK_EQ(net_request_.get(), request);
if (!request->status().is_success()) {
- NotifyStartErrorHelper(request->status(), kFetchScriptError);
+ NotifyDoneHelper(request->status(), kFetchScriptError);
return;
}
if (request->GetResponseCode() / 100 != 2) {
std::string error_message =
base::StringPrintf(kBadHTTPResponseError, request->GetResponseCode());
- NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INVALID_RESPONSE),
- error_message);
+ NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INVALID_RESPONSE),
+ error_message);
// TODO(michaeln): Instead of error'ing immediately, send the net
// response to our consumer, just don't cache it?
return;
@@ -309,10 +320,9 @@ void ServiceWorkerWriteToCacheJob::OnResponseStarted(
const net::HttpNetworkSession::Params* session_params =
request->context()->GetNetworkSessionParams();
if (!session_params || !session_params->ignore_certificate_errors) {
- NotifyStartErrorHelper(
- net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- kSSLError);
+ NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ kSSLError);
return;
}
}
@@ -327,10 +337,9 @@ void ServiceWorkerWriteToCacheJob::OnResponseStarted(
mime_type.empty()
? kNoMIMEError
: base::StringPrintf(kBadMIMEError, mime_type.c_str());
- NotifyStartErrorHelper(
- net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- error_message);
+ NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ error_message);
return;
}
@@ -366,31 +375,52 @@ void ServiceWorkerWriteToCacheJob::OnWriteHeadersComplete(net::Error error) {
NotifyHeadersComplete();
}
+void ServiceWorkerWriteToCacheJob::HandleNetData(int bytes_read) {
+ io_buffer_bytes_ = bytes_read;
+ net::Error error = cache_writer_->MaybeWriteData(
+ io_buffer_.get(), bytes_read,
+ base::Bind(&ServiceWorkerWriteToCacheJob::OnWriteDataComplete,
+ weak_factory_.GetWeakPtr()));
+ SetStatus(net::URLRequestStatus::FromError(error));
+
+ // In case of ERR_IO_PENDING, this logic is done in OnWriteDataComplete.
+ if (error != net::ERR_IO_PENDING && bytes_read == 0) {
+ NotifyFinishedCaching(net::URLRequestStatus::FromError(error),
+ std::string());
+ }
+}
+
void ServiceWorkerWriteToCacheJob::OnWriteDataComplete(net::Error error) {
SetStatus(net::URLRequestStatus::FromError(error));
DCHECK_NE(net::ERR_IO_PENDING, error);
- if (io_buffer_bytes_ == 0)
- error = NotifyFinishedCaching(net::URLRequestStatus::FromError(error), "");
- ReadRawDataComplete(error == net::OK ? io_buffer_bytes_ : error);
+ if (io_buffer_bytes_ == 0) {
+ NotifyDoneHelper(net::URLRequestStatus::FromError(error), std::string());
+ }
+ NotifyReadComplete(error == net::OK ? io_buffer_bytes_ : error);
}
void ServiceWorkerWriteToCacheJob::OnReadCompleted(net::URLRequest* request,
int bytes_read) {
DCHECK_EQ(net_request_.get(), request);
-
- int result;
if (bytes_read < 0) {
DCHECK(!request->status().is_success());
- result = NotifyFinishedCaching(request->status(), kFetchScriptError);
- } else {
- result = HandleNetData(bytes_read);
- }
-
- // ReadRawDataComplete will be called in OnWriteDataComplete, so return early.
- if (result == net::ERR_IO_PENDING)
+ NotifyDoneHelper(request->status(), kFetchScriptError);
return;
-
- ReadRawDataComplete(result);
+ }
+ HandleNetData(bytes_read);
+ // HandleNetData can cause status of this job to change. If the status changes
+ // to IO_PENDING, that means HandleNetData has pending IO, and
+ // NotifyReadComplete will be called later by the appropriate callback.
+ if (!GetStatus().is_io_pending()) {
+ int result = GetStatus().status() == net::URLRequestStatus::SUCCESS
+ ? bytes_read
+ : GetStatus().error();
+ // If bytes_read is 0, HandleNetData synchronously completed and this job is
+ // at EOF.
+ if (bytes_read == 0)
+ NotifyDoneHelper(GetStatus(), std::string());
+ NotifyReadComplete(result);
+ }
}
bool ServiceWorkerWriteToCacheJob::CheckPathRestriction(
@@ -404,57 +434,43 @@ bool ServiceWorkerWriteToCacheJob::CheckPathRestriction(
if (!ServiceWorkerUtils::IsPathRestrictionSatisfied(
version_->scope(), url_,
has_header ? &service_worker_allowed : nullptr, &error_message)) {
- NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- error_message);
+ NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ error_message);
return false;
}
return true;
}
-int ServiceWorkerWriteToCacheJob::HandleNetData(int bytes_read) {
- io_buffer_bytes_ = bytes_read;
- net::Error error = cache_writer_->MaybeWriteData(
- io_buffer_.get(), bytes_read,
- base::Bind(&ServiceWorkerWriteToCacheJob::OnWriteDataComplete,
- weak_factory_.GetWeakPtr()));
-
- // In case of ERR_IO_PENDING, this logic is done in OnWriteDataComplete.
- if (error != net::ERR_IO_PENDING && bytes_read == 0) {
- error = NotifyFinishedCaching(net::URLRequestStatus::FromError(error),
- std::string());
- }
- return error == net::OK ? bytes_read : error;
-}
-
-void ServiceWorkerWriteToCacheJob::NotifyStartErrorHelper(
+void ServiceWorkerWriteToCacheJob::NotifyDoneHelper(
const net::URLRequestStatus& status,
const std::string& status_message) {
DCHECK(!status.is_io_pending());
- net::Error error = NotifyFinishedCaching(status, status_message);
- // The special case mentioned in NotifyFinishedCaching about script being
- // identical does not apply here, since the entire body needs to be read
- // before this is relevant.
- DCHECK_EQ(status.error(), error);
+ // Note that NotifyFinishedCaching has logic in it to detect the special case
+ // mentioned below as well.
+ NotifyFinishedCaching(status, status_message);
net::URLRequestStatus reported_status = status;
std::string reported_status_message = status_message;
+ // A strange special case: requests that successfully fetch the entire
+ // ServiceWorker and write it back, but which did not replace the incumbent
+ // script because the new script was identical, are considered to have failed.
+ if (status.is_success() && !cache_writer_->did_replace()) {
+ reported_status = net::URLRequestStatus::FromError(kIdenticalScriptError);
+ reported_status_message = "";
+ }
+
SetStatus(reported_status);
- NotifyStartError(reported_status);
+ NotifyDone(reported_status);
}
-net::Error ServiceWorkerWriteToCacheJob::NotifyFinishedCaching(
+void ServiceWorkerWriteToCacheJob::NotifyFinishedCaching(
net::URLRequestStatus status,
const std::string& status_message) {
- net::Error result = static_cast<net::Error>(status.error());
if (did_notify_finished_)
- return result;
-
- int size = -1;
- if (status.is_success())
- size = cache_writer_->bytes_written();
+ return;
// If all the calls to MaybeWriteHeaders/MaybeWriteData succeeded, but the
// incumbent entry wasn't actually replaced because the new entry was
@@ -462,18 +478,17 @@ net::Error ServiceWorkerWriteToCacheJob::NotifyFinishedCaching(
// exists.
if (status.status() == net::URLRequestStatus::SUCCESS &&
!cache_writer_->did_replace()) {
- result = kIdenticalScriptError;
- status = net::URLRequestStatus::FromError(result);
+ status = net::URLRequestStatus::FromError(kIdenticalScriptError);
version_->SetStartWorkerStatusCode(SERVICE_WORKER_ERROR_EXISTS);
- version_->script_cache_map()->NotifyFinishedCaching(url_, size, status,
- std::string());
- } else {
- version_->script_cache_map()->NotifyFinishedCaching(url_, size, status,
- status_message);
}
+ int size = -1;
+ if (status.is_success())
+ size = cache_writer_->bytes_written();
+
+ version_->script_cache_map()->NotifyFinishedCaching(url_, size, status,
+ status_message);
did_notify_finished_ = true;
- return result;
}
scoped_ptr<ServiceWorkerResponseReader>
diff --git a/content/browser/service_worker/service_worker_write_to_cache_job.h b/content/browser/service_worker/service_worker_write_to_cache_job.h
index e614912..9227239 100644
--- a/content/browser/service_worker/service_worker_write_to_cache_job.h
+++ b/content/browser/service_worker/service_worker_write_to_cache_job.h
@@ -64,7 +64,6 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
// net::URLRequestJob overrides
void Start() override;
- void StartAsync();
void Kill() override;
net::LoadState GetLoadState() const override;
bool GetCharset(std::string* charset) override;
@@ -72,7 +71,7 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
const net::HttpResponseInfo* http_info() const;
@@ -110,22 +109,18 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
bool CheckPathRestriction(net::URLRequest* request);
// Writes network data back to the script cache if needed, and notifies the
- // script cache of fetch completion at EOF. This function returns
- // net::IO_PENDING if the IO is to be completed asynchronously, returns a
- // negative number that represents a corresponding net error code (other than
- // net::IO_PENDING) if an error occurred, or returns a non-negative number
- // that represents the number of network bytes read. If the return value is
- // non-negative, all of the data in |io_buffer_| has been written back to the
- // script cache if necessary.
- int HandleNetData(int bytes_read);
-
- void NotifyStartErrorHelper(const net::URLRequestStatus& status,
- const std::string& status_message);
-
- // Returns an error code that is passed in through |status| or a new one if an
- // additional error is found.
- net::Error NotifyFinishedCaching(net::URLRequestStatus status,
- const std::string& status_message);
+ // script cache of fetch completion at EOF. This function might need to do
+ // asynchronous IO; if so, it signals this through setting the URLRequestJob's
+ // status to IO_PENDING. After this function returns, if the URLRequestJob
+ // isn't IO_PENDING, all of the data in |io_buffer_| has been written back to
+ // the script cache if necessary.
+ void HandleNetData(int bytes_read);
+
+ void NotifyDoneHelper(const net::URLRequestStatus& status,
+ const std::string& status_message);
+
+ void NotifyFinishedCaching(net::URLRequestStatus status,
+ const std::string& status_message);
scoped_ptr<ServiceWorkerResponseReader> CreateCacheResponseReader();
scoped_ptr<ServiceWorkerResponseWriter> CreateCacheResponseWriter();
@@ -145,7 +140,6 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
bool has_been_killed_;
bool did_notify_started_;
bool did_notify_finished_;
-
base::WeakPtrFactory<ServiceWorkerWriteToCacheJob> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(ServiceWorkerWriteToCacheJob);
diff --git a/content/browser/streams/stream_url_request_job.cc b/content/browser/streams/stream_url_request_job.cc
index e26fe35..0392d10 100644
--- a/content/browser/streams/stream_url_request_job.cc
+++ b/content/browser/streams/stream_url_request_job.cc
@@ -40,6 +40,8 @@ StreamURLRequestJob::~StreamURLRequestJob() {
}
void StreamURLRequestJob::OnDataAvailable(Stream* stream) {
+ // Clear the IO_PENDING status.
+ SetStatus(net::URLRequestStatus());
// Do nothing if pending_buffer_ is empty, i.e. there's no ReadRawData()
// operation waiting for IO completion.
if (!pending_buffer_.get())
@@ -48,22 +50,24 @@ void StreamURLRequestJob::OnDataAvailable(Stream* stream) {
// pending_buffer_ is set to the IOBuffer instance provided to ReadRawData()
// by URLRequestJob.
- int result = 0;
+ int bytes_read;
switch (stream_->ReadRawData(pending_buffer_.get(), pending_buffer_size_,
- &result)) {
+ &bytes_read)) {
case Stream::STREAM_HAS_DATA:
- DCHECK_GT(result, 0);
+ DCHECK_GT(bytes_read, 0);
break;
case Stream::STREAM_COMPLETE:
- // Ensure ReadRawData gives net::OK.
- DCHECK_EQ(net::OK, result);
+ // Ensure this. Calling NotifyReadComplete call with 0 signals
+ // completion.
+ bytes_read = 0;
break;
case Stream::STREAM_EMPTY:
NOTREACHED();
break;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
- result = net::ERR_CONNECTION_RESET;
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_CONNECTION_RESET));
break;
}
@@ -72,9 +76,8 @@ void StreamURLRequestJob::OnDataAvailable(Stream* stream) {
pending_buffer_ = NULL;
pending_buffer_size_ = 0;
- if (result > 0)
- total_bytes_read_ += result;
- ReadRawDataComplete(result);
+ total_bytes_read_ += bytes_read;
+ NotifyReadComplete(bytes_read);
}
// net::URLRequestJob methods.
@@ -91,40 +94,43 @@ void StreamURLRequestJob::Kill() {
ClearStream();
}
-int StreamURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
- // TODO(ellyjones): This is not right. The old code returned true here, but
- // ReadRawData's old contract was to return true only for synchronous
- // successes, which had the effect of treating all errors as synchronous EOFs.
- // See https://crbug.com/508957
+bool StreamURLRequestJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
if (request_failed_)
- return 0;
+ return true;
DCHECK(buf);
+ DCHECK(bytes_read);
int to_read = buf_size;
if (max_range_ && to_read) {
if (to_read + total_bytes_read_ > max_range_)
to_read = max_range_ - total_bytes_read_;
- if (to_read == 0)
- return 0;
+ if (to_read <= 0) {
+ *bytes_read = 0;
+ return true;
+ }
}
- int bytes_read = 0;
- switch (stream_->ReadRawData(buf, to_read, &bytes_read)) {
+ switch (stream_->ReadRawData(buf, to_read, bytes_read)) {
case Stream::STREAM_HAS_DATA:
case Stream::STREAM_COMPLETE:
- total_bytes_read_ += bytes_read;
- return bytes_read;
+ total_bytes_read_ += *bytes_read;
+ return true;
case Stream::STREAM_EMPTY:
pending_buffer_ = buf;
pending_buffer_size_ = to_read;
- return net::ERR_IO_PENDING;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
- return net::ERR_CONNECTION_RESET;
+ NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_CONNECTION_RESET));
+ return false;
}
NOTREACHED();
- return net::ERR_FAILED;
+ return false;
}
bool StreamURLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -183,8 +189,13 @@ void StreamURLRequestJob::DidStart() {
void StreamURLRequestJob::NotifyFailure(int error_code) {
request_failed_ = true;
- // This method can only be called before headers are set.
- DCHECK(!headers_set_);
+ // If we already return the headers on success, we can't change the headers
+ // now. Instead, we just error out.
+ if (headers_set_) {
+ NotifyDone(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, error_code));
+ return;
+ }
// TODO(zork): Share these with BlobURLRequestJob.
net::HttpStatusCode status_code = net::HTTP_INTERNAL_SERVER_ERROR;
diff --git a/content/browser/streams/stream_url_request_job.h b/content/browser/streams/stream_url_request_job.h
index f22311d..05c9551 100644
--- a/content/browser/streams/stream_url_request_job.h
+++ b/content/browser/streams/stream_url_request_job.h
@@ -29,7 +29,7 @@ class CONTENT_EXPORT StreamURLRequestJob
// net::URLRequestJob methods.
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
diff --git a/content/browser/webui/url_data_manager_backend.cc b/content/browser/webui/url_data_manager_backend.cc
index 485aff6..44302df 100644
--- a/content/browser/webui/url_data_manager_backend.cc
+++ b/content/browser/webui/url_data_manager_backend.cc
@@ -119,7 +119,7 @@ class URLRequestChromeJob : public net::URLRequestJob {
// net::URLRequestJob implementation.
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
bool GetMimeType(std::string* mime_type) const override;
int GetResponseCode() const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
@@ -190,9 +190,8 @@ class URLRequestChromeJob : public net::URLRequestJob {
bool RequiresUnsafeEval() const;
// Do the actual copy from data_ (the data we're serving) into |buf|.
- // Separate from ReadRawData so we can handle async I/O. Returns the number of
- // bytes read.
- int CompleteRead(net::IOBuffer* buf, int buf_size);
+ // Separate from ReadRawData so we can handle async I/O.
+ void CompleteRead(net::IOBuffer* buf, int buf_size, int* bytes_read);
// The actual data we're serving. NULL until it's been fetched.
scoped_refptr<base::RefCountedMemory> data_;
@@ -337,16 +336,22 @@ void URLRequestChromeJob::MimeTypeAvailable(const std::string& mime_type) {
void URLRequestChromeJob::DataAvailable(base::RefCountedMemory* bytes) {
TRACE_EVENT_ASYNC_END0("browser", "DataManager:Request", this);
if (bytes) {
+ // The request completed, and we have all the data.
+ // Clear any IO pending status.
+ SetStatus(net::URLRequestStatus());
+
data_ = bytes;
+ int bytes_read;
if (pending_buf_.get()) {
CHECK(pending_buf_->data());
- int result = CompleteRead(pending_buf_.get(), pending_buf_size_);
+ CompleteRead(pending_buf_.get(), pending_buf_size_, &bytes_read);
pending_buf_ = NULL;
- ReadRawDataComplete(result);
+ NotifyReadComplete(bytes_read);
}
} else {
// The request failed.
- ReadRawDataComplete(net::ERR_FAILED);
+ NotifyDone(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
}
}
@@ -354,21 +359,27 @@ base::WeakPtr<URLRequestChromeJob> URLRequestChromeJob::AsWeakPtr() {
return weak_factory_.GetWeakPtr();
}
-int URLRequestChromeJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
+bool URLRequestChromeJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
if (!data_.get()) {
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
DCHECK(!pending_buf_.get());
CHECK(buf->data());
pending_buf_ = buf;
pending_buf_size_ = buf_size;
- return net::ERR_IO_PENDING;
+ return false; // Tell the caller we're still waiting for data.
}
// Otherwise, the data is available.
- return CompleteRead(buf, buf_size);
+ CompleteRead(buf, buf_size, bytes_read);
+ return true;
}
-int URLRequestChromeJob::CompleteRead(net::IOBuffer* buf, int buf_size) {
- int remaining = data_->size() - data_offset_;
+void URLRequestChromeJob::CompleteRead(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
+ int remaining = static_cast<int>(data_->size()) - data_offset_;
if (buf_size > remaining)
buf_size = remaining;
if (buf_size > 0) {
@@ -380,7 +391,7 @@ int URLRequestChromeJob::CompleteRead(net::IOBuffer* buf, int buf_size) {
memcpy(buf->data(), data_->front() + data_offset_, buf_size);
data_offset_ += buf_size;
}
- return buf_size;
+ *bytes_read = buf_size;
}
void URLRequestChromeJob::CheckStoragePartitionMatches(
diff --git a/content/test/net/url_request_abort_on_end_job.cc b/content/test/net/url_request_abort_on_end_job.cc
index 6d237a8..f7ff5a9 100644
--- a/content/test/net/url_request_abort_on_end_job.cc
+++ b/content/test/net/url_request_abort_on_end_job.cc
@@ -8,7 +8,6 @@
#include "base/compiler_specific.h"
#include "base/location.h"
-#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
#include "base/thread_task_runner_handle.h"
@@ -112,16 +111,20 @@ void URLRequestAbortOnEndJob::Start() {
weak_factory_.GetWeakPtr()));
}
-int URLRequestAbortOnEndJob::ReadRawData(net::IOBuffer* buf, int max_bytes) {
+bool URLRequestAbortOnEndJob::ReadRawData(net::IOBuffer* buf,
+ const int max_bytes,
+ int* bytes_read) {
if (!sent_data_) {
- max_bytes =
- std::min(max_bytes, base::checked_cast<int>(sizeof(kPageContent)));
- std::memcpy(buf->data(), kPageContent, max_bytes);
+ *bytes_read = std::min(size_t(max_bytes), sizeof(kPageContent));
+ std::memcpy(buf->data(), kPageContent, *bytes_read);
sent_data_ = true;
- return max_bytes;
+ return true;
}
- return net::ERR_CONNECTION_ABORTED;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_CONNECTION_ABORTED));
+ *bytes_read = -1;
+ return false;
}
} // namespace content
diff --git a/content/test/net/url_request_abort_on_end_job.h b/content/test/net/url_request_abort_on_end_job.h
index 8a95533..88e348c 100644
--- a/content/test/net/url_request_abort_on_end_job.h
+++ b/content/test/net/url_request_abort_on_end_job.h
@@ -29,7 +29,7 @@ class URLRequestAbortOnEndJob : public net::URLRequestJob {
void Start() override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
static void AddUrlHandler();
diff --git a/ios/web/webui/url_data_manager_ios_backend.cc b/ios/web/webui/url_data_manager_ios_backend.cc
index d5117c2..cc3e3e9 100644
--- a/ios/web/webui/url_data_manager_ios_backend.cc
+++ b/ios/web/webui/url_data_manager_ios_backend.cc
@@ -96,7 +96,7 @@ class URLRequestChromeJob : public net::URLRequestJob {
// net::URLRequestJob implementation.
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
bool GetMimeType(std::string* mime_type) const override;
int GetResponseCode() const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
@@ -142,7 +142,7 @@ class URLRequestChromeJob : public net::URLRequestJob {
// Do the actual copy from data_ (the data we're serving) into |buf|.
// Separate from ReadRawData so we can handle async I/O.
- int CompleteRead(net::IOBuffer* buf, int buf_size);
+ void CompleteRead(net::IOBuffer* buf, int buf_size, int* bytes_read);
// The actual data we're serving. NULL until it's been fetched.
scoped_refptr<base::RefCountedMemory> data_;
@@ -291,46 +291,58 @@ void URLRequestChromeJob::MimeTypeAvailable(const std::string& mime_type) {
void URLRequestChromeJob::DataAvailable(base::RefCountedMemory* bytes) {
TRACE_EVENT_ASYNC_END0("browser", "DataManager:Request", this);
if (bytes) {
+ // The request completed, and we have all the data.
+ // Clear any IO pending status.
+ SetStatus(net::URLRequestStatus());
+
data_ = bytes;
+ int bytes_read;
if (pending_buf_.get()) {
CHECK(pending_buf_->data());
- int rv = CompleteRead(pending_buf_.get(), pending_buf_size_);
+ CompleteRead(pending_buf_.get(), pending_buf_size_, &bytes_read);
pending_buf_ = NULL;
- ReadRawDataComplete(rv);
+ NotifyReadComplete(bytes_read);
}
} else {
- ReadRawDataComplete(net::ERR_FAILED);
+ // The request failed.
+ NotifyDone(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
}
}
-int URLRequestChromeJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
+bool URLRequestChromeJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
if (!data_.get()) {
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
DCHECK(!pending_buf_.get());
CHECK(buf->data());
pending_buf_ = buf;
pending_buf_size_ = buf_size;
- return net::ERR_IO_PENDING; // Tell the caller we're still waiting for
- // data.
+ return false; // Tell the caller we're still waiting for data.
}
// Otherwise, the data is available.
- return CompleteRead(buf, buf_size);
+ CompleteRead(buf, buf_size, bytes_read);
+ return true;
}
-int URLRequestChromeJob::CompleteRead(net::IOBuffer* buf, int buf_size) {
+void URLRequestChromeJob::CompleteRead(net::IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
// http://crbug.com/373841
char url_buf[128];
base::strlcpy(url_buf, request_->url().spec().c_str(), arraysize(url_buf));
base::debug::Alias(url_buf);
- int remaining = data_->size() - data_offset_;
+ int remaining = static_cast<int>(data_->size()) - data_offset_;
if (buf_size > remaining)
buf_size = remaining;
if (buf_size > 0) {
memcpy(buf->data(), data_->front() + data_offset_, buf_size);
data_offset_ += buf_size;
}
- return buf_size;
+ *bytes_read = buf_size;
}
namespace {
diff --git a/mojo/services/network/url_loader_impl_apptest.cc b/mojo/services/network/url_loader_impl_apptest.cc
index 0b9de67..0fe1749 100644
--- a/mojo/services/network/url_loader_impl_apptest.cc
+++ b/mojo/services/network/url_loader_impl_apptest.cc
@@ -49,28 +49,32 @@ class TestURLRequestJob : public net::URLRequestJob {
void Start() override { status_ = STARTED; }
- int ReadRawData(net::IOBuffer* buf, int buf_size) override {
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override {
status_ = READING;
buf_size_ = buf_size;
- return net::ERR_IO_PENDING;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
}
void NotifyHeadersComplete() { net::URLRequestJob::NotifyHeadersComplete(); }
void NotifyReadComplete(int bytes_read) {
if (bytes_read < 0) {
- // Map errors to net::ERR_FAILED.
- ReadRawDataComplete(net::ERR_FAILED);
+ NotifyDone(net::URLRequestStatus(
+ net::URLRequestStatus::FromError(net::ERR_FAILED)));
+ net::URLRequestJob::NotifyReadComplete(0);
// Set this after calling ReadRawDataComplete since that ends up calling
// ReadRawData.
status_ = COMPLETED;
} else if (bytes_read == 0) {
- ReadRawDataComplete(bytes_read);
+ NotifyDone(net::URLRequestStatus());
+ net::URLRequestJob::NotifyReadComplete(bytes_read);
// Set this after calling ReadRawDataComplete since that ends up calling
// ReadRawData.
status_ = COMPLETED;
} else {
- ReadRawDataComplete(bytes_read);
+ SetStatus(net::URLRequestStatus());
+ net::URLRequestJob::NotifyReadComplete(bytes_read);
// Set this after calling ReadRawDataComplete since that ends up calling
// ReadRawData.
status_ = STARTED;
diff --git a/net/test/url_request/url_request_failed_job.cc b/net/test/url_request/url_request_failed_job.cc
index d7c479b..e4ac6a6 100644
--- a/net/test/url_request/url_request_failed_job.cc
+++ b/net/test/url_request/url_request_failed_job.cc
@@ -97,20 +97,40 @@ URLRequestFailedJob::URLRequestFailedJob(URLRequest* request,
}
void URLRequestFailedJob::Start() {
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- base::Bind(&URLRequestFailedJob::StartAsync, weak_factory_.GetWeakPtr()));
+ if (phase_ == START) {
+ if (net_error_ != ERR_IO_PENDING) {
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, net_error_));
+ return;
+ }
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return;
+ }
+ response_info_.headers = new net::HttpResponseHeaders("HTTP/1.1 200 OK");
+ NotifyHeadersComplete();
}
-int URLRequestFailedJob::ReadRawData(IOBuffer* buf, int buf_size) {
+bool URLRequestFailedJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
CHECK(phase_ == READ_SYNC || phase_ == READ_ASYNC);
- if (net_error_ == ERR_IO_PENDING || phase_ == READ_SYNC)
- return net_error_;
+ if (net_error_ != ERR_IO_PENDING && phase_ == READ_SYNC) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, net_error_));
+ return false;
+ }
+
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+ if (net_error_ == ERR_IO_PENDING)
+ return false;
+
+ DCHECK_EQ(READ_ASYNC, phase_);
+ DCHECK_NE(ERR_IO_PENDING, net_error_);
base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&URLRequestFailedJob::ReadRawDataComplete,
- weak_factory_.GetWeakPtr(), net_error_));
- return ERR_IO_PENDING;
+ FROM_HERE,
+ base::Bind(&URLRequestFailedJob::NotifyDone, weak_factory_.GetWeakPtr(),
+ URLRequestStatus(URLRequestStatus::FAILED, net_error_)));
+ return false;
}
int URLRequestFailedJob::GetResponseCode() const {
@@ -175,17 +195,4 @@ GURL URLRequestFailedJob::GetMockHttpsUrlForHostname(
URLRequestFailedJob::~URLRequestFailedJob() {
}
-void URLRequestFailedJob::StartAsync() {
- if (phase_ == START) {
- if (net_error_ != ERR_IO_PENDING) {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, net_error_));
- return;
- }
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- return;
- }
- response_info_.headers = new net::HttpResponseHeaders("HTTP/1.1 200 OK");
- NotifyHeadersComplete();
-}
-
} // namespace net
diff --git a/net/test/url_request/url_request_failed_job.h b/net/test/url_request/url_request_failed_job.h
index 45b1911..0413111 100644
--- a/net/test/url_request/url_request_failed_job.h
+++ b/net/test/url_request/url_request_failed_job.h
@@ -39,7 +39,7 @@ class URLRequestFailedJob : public URLRequestJob {
// URLRequestJob implementation:
void Start() override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
int GetResponseCode() const override;
void GetResponseInfo(HttpResponseInfo* info) override;
@@ -71,7 +71,6 @@ class URLRequestFailedJob : public URLRequestJob {
protected:
~URLRequestFailedJob() override;
- void StartAsync();
private:
HttpResponseInfo response_info_;
diff --git a/net/test/url_request/url_request_mock_data_job.cc b/net/test/url_request/url_request_mock_data_job.cc
index b9ef385..9549242 100644
--- a/net/test/url_request/url_request_mock_data_job.cc
+++ b/net/test/url_request/url_request_mock_data_job.cc
@@ -104,12 +104,15 @@ void URLRequestMockDataJob::Start() {
URLRequestMockDataJob::~URLRequestMockDataJob() {
}
-int URLRequestMockDataJob::ReadRawData(IOBuffer* buf, int buf_size) {
- int bytes_read =
- std::min(static_cast<size_t>(buf_size), data_.length() - data_offset_);
- memcpy(buf->data(), data_.c_str() + data_offset_, bytes_read);
- data_offset_ += bytes_read;
- return bytes_read;
+bool URLRequestMockDataJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
+ DCHECK(bytes_read);
+ *bytes_read = static_cast<int>(
+ std::min(static_cast<size_t>(buf_size), data_.length() - data_offset_));
+ memcpy(buf->data(), data_.c_str() + data_offset_, *bytes_read);
+ data_offset_ += *bytes_read;
+ return true;
}
int URLRequestMockDataJob::GetResponseCode() const {
diff --git a/net/test/url_request/url_request_mock_data_job.h b/net/test/url_request/url_request_mock_data_job.h
index 14ad665..3d84e37 100644
--- a/net/test/url_request/url_request_mock_data_job.h
+++ b/net/test/url_request/url_request_mock_data_job.h
@@ -24,7 +24,7 @@ class URLRequestMockDataJob : public URLRequestJob {
int data_repeat_count);
void Start() override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
int GetResponseCode() const override;
void GetResponseInfo(HttpResponseInfo* info) override;
diff --git a/net/test/url_request/url_request_slow_download_job.cc b/net/test/url_request/url_request_slow_download_job.cc
index 52fd71c..f344feb 100644
--- a/net/test/url_request/url_request_slow_download_job.cc
+++ b/net/test/url_request/url_request_slow_download_job.cc
@@ -179,34 +179,39 @@ URLRequestSlowDownloadJob::FillBufferHelper(IOBuffer* buf,
return REQUEST_COMPLETE;
}
-int URLRequestSlowDownloadJob::ReadRawData(IOBuffer* buf, int buf_size) {
+bool URLRequestSlowDownloadJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
if (base::LowerCaseEqualsASCII(kFinishDownloadUrl,
request_->url().spec().c_str()) ||
base::LowerCaseEqualsASCII(kErrorDownloadUrl,
request_->url().spec().c_str())) {
VLOG(10) << __FUNCTION__ << " called w/ kFinish/ErrorDownloadUrl.";
- return 0;
+ *bytes_read = 0;
+ return true;
}
VLOG(10) << __FUNCTION__ << " called at position " << bytes_already_sent_
<< " in the stream.";
- int bytes_read = 0;
- ReadStatus status = FillBufferHelper(buf, buf_size, &bytes_read);
+ ReadStatus status = FillBufferHelper(buf, buf_size, bytes_read);
switch (status) {
case BUFFER_FILLED:
- case REQUEST_COMPLETE:
- return bytes_read;
+ return true;
case REQUEST_BLOCKED:
buffer_ = buf;
buffer_size_ = buf_size;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&URLRequestSlowDownloadJob::CheckDoneStatus,
weak_factory_.GetWeakPtr()),
base::TimeDelta::FromMilliseconds(100));
- return ERR_IO_PENDING;
+ return false;
+ case REQUEST_COMPLETE:
+ *bytes_read = 0;
+ return true;
}
NOTREACHED();
- return OK;
+ return true;
}
void URLRequestSlowDownloadJob::CheckDoneStatus() {
@@ -218,10 +223,12 @@ void URLRequestSlowDownloadJob::CheckDoneStatus() {
FillBufferHelper(buffer_.get(), buffer_size_, &bytes_written);
DCHECK_EQ(BUFFER_FILLED, status);
buffer_ = NULL; // Release the reference.
- ReadRawDataComplete(bytes_written);
+ SetStatus(URLRequestStatus());
+ NotifyReadComplete(bytes_written);
} else if (should_error_download_) {
VLOG(10) << __FUNCTION__ << " called w/ should_finish_ownload_ set.";
- ReadRawDataComplete(ERR_CONNECTION_RESET);
+ NotifyDone(
+ URLRequestStatus(URLRequestStatus::FAILED, ERR_CONNECTION_RESET));
} else {
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&URLRequestSlowDownloadJob::CheckDoneStatus,
diff --git a/net/test/url_request/url_request_slow_download_job.h b/net/test/url_request/url_request_slow_download_job.h
index fcd7f661..115a6ac 100644
--- a/net/test/url_request/url_request_slow_download_job.h
+++ b/net/test/url_request/url_request_slow_download_job.h
@@ -37,7 +37,7 @@ class URLRequestSlowDownloadJob : public URLRequestJob {
void Start() override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(HttpResponseInfo* info) override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
// Returns the current number of URLRequestSlowDownloadJobs that have
// not yet completed.
diff --git a/net/url_request/url_request_file_dir_job.cc b/net/url_request/url_request_file_dir_job.cc
index 1dafbf2..114672e 100644
--- a/net/url_request/url_request_file_dir_job.cc
+++ b/net/url_request/url_request_file_dir_job.cc
@@ -13,6 +13,7 @@
#include "base/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/url_request/url_request_status.h"
#include "url/gurl.h"
@@ -65,19 +66,24 @@ void URLRequestFileDirJob::Kill() {
weak_factory_.InvalidateWeakPtrs();
}
-int URLRequestFileDirJob::ReadRawData(IOBuffer* buf, int buf_size) {
+bool URLRequestFileDirJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
+ DCHECK(bytes_read);
+ *bytes_read = 0;
+
if (is_done())
- return 0;
+ return true;
- int bytes_read = 0;
- if (FillReadBuffer(buf->data(), buf_size, &bytes_read))
- return bytes_read;
+ if (FillReadBuffer(buf->data(), buf_size, bytes_read))
+ return true;
// We are waiting for more data
read_pending_ = true;
read_buffer_ = buf;
read_buffer_length_ = buf_size;
- return ERR_IO_PENDING;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return false;
}
bool URLRequestFileDirJob::GetMimeType(std::string* mime_type) const {
@@ -126,45 +132,40 @@ void URLRequestFileDirJob::OnListFile(
data.info.GetLastModifiedTime()));
// TODO(darin): coalesce more?
- CompleteRead(OK);
+ CompleteRead();
}
void URLRequestFileDirJob::OnListDone(int error) {
DCHECK(!canceled_);
- DCHECK_LE(error, OK);
- if (error == OK)
+ if (error != OK) {
+ read_pending_ = false;
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, error));
+ } else {
list_complete_ = true;
- CompleteRead(static_cast<Error>(error));
+ CompleteRead();
+ }
}
URLRequestFileDirJob::~URLRequestFileDirJob() {}
-void URLRequestFileDirJob::CompleteRead(Error status) {
- DCHECK_LE(status, OK);
- DCHECK_NE(status, ERR_IO_PENDING);
-
- // Do nothing if there is no read pending.
- if (!read_pending_)
- return;
-
- int result = status;
- if (status == OK) {
- int filled_bytes = 0;
+void URLRequestFileDirJob::CompleteRead() {
+ if (read_pending_) {
+ int bytes_read;
if (FillReadBuffer(read_buffer_->data(), read_buffer_length_,
- &filled_bytes)) {
- result = filled_bytes;
+ &bytes_read)) {
// We completed the read, so reset the read buffer.
+ read_pending_ = false;
read_buffer_ = NULL;
read_buffer_length_ = 0;
+
+ SetStatus(URLRequestStatus());
+ NotifyReadComplete(bytes_read);
} else {
NOTREACHED();
// TODO: Better error code.
- result = ERR_FAILED;
+ NotifyDone(URLRequestStatus::FromError(ERR_FAILED));
}
}
-
- read_pending_ = false;
- ReadRawDataComplete(result);
}
bool URLRequestFileDirJob::FillReadBuffer(char* buf, int buf_size,
diff --git a/net/url_request/url_request_file_dir_job.h b/net/url_request/url_request_file_dir_job.h
index f7a7b45..067db81 100644
--- a/net/url_request/url_request_file_dir_job.h
+++ b/net/url_request/url_request_file_dir_job.h
@@ -10,7 +10,6 @@
#include "base/files/file_path.h"
#include "base/memory/weak_ptr.h"
#include "net/base/directory_lister.h"
-#include "net/base/net_errors.h"
#include "net/url_request/url_request_job.h"
namespace net {
@@ -30,7 +29,7 @@ class URLRequestFileDirJob
// Overridden from URLRequestJob:
void Start() override;
void Kill() override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
bool GetMimeType(std::string* mime_type) const override;
bool GetCharset(std::string* charset) override;
@@ -46,7 +45,7 @@ class URLRequestFileDirJob
// When we have data and a read has been pending, this function
// will fill the response buffer and notify the request
// appropriately.
- void CompleteRead(Error error);
+ void CompleteRead();
// Fills a buffer with the output.
bool FillReadBuffer(char* buf, int buf_size, int* bytes_read);
@@ -68,7 +67,6 @@ class URLRequestFileDirJob
bool read_pending_;
scoped_refptr<IOBuffer> read_buffer_;
int read_buffer_length_;
-
base::WeakPtrFactory<URLRequestFileDirJob> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(URLRequestFileDirJob);
diff --git a/net/url_request/url_request_file_job.cc b/net/url_request/url_request_file_job.cc
index 2c5cf6a..06e1ba6 100644
--- a/net/url_request/url_request_file_job.cc
+++ b/net/url_request/url_request_file_job.cc
@@ -33,6 +33,7 @@
#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
#include "net/base/mime_util.h"
+#include "net/base/net_errors.h"
#include "net/filter/filter.h"
#include "net/http/http_util.h"
#include "net/url_request/url_request_error_job.h"
@@ -62,7 +63,6 @@ URLRequestFileJob::URLRequestFileJob(
stream_(new FileStream(file_task_runner)),
file_task_runner_(file_task_runner),
remaining_bytes_(0),
- range_parse_result_(OK),
weak_ptr_factory_(this) {}
void URLRequestFileJob::Start() {
@@ -83,17 +83,22 @@ void URLRequestFileJob::Kill() {
URLRequestJob::Kill();
}
-int URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size) {
+bool URLRequestFileJob::ReadRawData(IOBuffer* dest,
+ int dest_size,
+ int* bytes_read) {
DCHECK_NE(dest_size, 0);
+ DCHECK(bytes_read);
DCHECK_GE(remaining_bytes_, 0);
if (remaining_bytes_ < dest_size)
- dest_size = remaining_bytes_;
+ dest_size = static_cast<int>(remaining_bytes_);
// If we should copy zero bytes because |remaining_bytes_| is zero, short
// circuit here.
- if (!dest_size)
- return 0;
+ if (!dest_size) {
+ *bytes_read = 0;
+ return true;
+ }
int rv = stream_->Read(dest,
dest_size,
@@ -101,11 +106,20 @@ int URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size) {
weak_ptr_factory_.GetWeakPtr(),
make_scoped_refptr(dest)));
if (rv >= 0) {
+ // Data is immediately available.
+ *bytes_read = rv;
remaining_bytes_ -= rv;
DCHECK_GE(remaining_bytes_, 0);
+ return true;
}
- return rv;
+ // Otherwise, a read error occured. We may just need to wait...
+ if (rv == ERR_IO_PENDING) {
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ }
+ return false;
}
bool URLRequestFileJob::IsRedirectResponse(GURL* location,
@@ -165,10 +179,7 @@ void URLRequestFileJob::SetExtraRequestHeaders(
const HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) {
- // This job only cares about the Range header. This method stashes the value
- // for later use in DidOpen(), which is responsible for some of the range
- // validation as well. NotifyStartError is not legal to call here since
- // the job has not started.
+ // We only care about "Range" header here.
std::vector<HttpByteRange> ranges;
if (HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
@@ -178,7 +189,8 @@ void URLRequestFileJob::SetExtraRequestHeaders(
// because we need to do multipart encoding here.
// TODO(hclam): decide whether we want to support multiple range
// requests.
- range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
}
}
}
@@ -239,19 +251,13 @@ void URLRequestFileJob::DidFetchMetaInfo(const FileMetaInfo* meta_info) {
void URLRequestFileJob::DidOpen(int result) {
if (result != OK) {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
- return;
- }
-
- if (range_parse_result_ != net::OK) {
- NotifyStartError(
- URLRequestStatus(URLRequestStatus::FAILED, range_parse_result_));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
return;
}
if (!byte_range_.ComputeBounds(meta_info_.file_size)) {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
- net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -279,8 +285,8 @@ void URLRequestFileJob::DidOpen(int result) {
void URLRequestFileJob::DidSeek(int64 result) {
OnSeekComplete(result);
if (result != byte_range_.first_byte_position()) {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -289,7 +295,8 @@ void URLRequestFileJob::DidSeek(int64 result) {
}
void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) {
- if (result >= 0) {
+ if (result > 0) {
+ SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
remaining_bytes_ -= result;
DCHECK_GE(remaining_bytes_, 0);
}
@@ -297,7 +304,13 @@ void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) {
OnReadComplete(buf.get(), result);
buf = NULL;
- ReadRawDataComplete(result);
+ if (result == 0) {
+ NotifyDone(URLRequestStatus());
+ } else if (result < 0) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ }
+
+ NotifyReadComplete(result);
}
} // namespace net
diff --git a/net/url_request/url_request_file_job.h b/net/url_request/url_request_file_job.h
index 97c6c21..0436dac 100644
--- a/net/url_request/url_request_file_job.h
+++ b/net/url_request/url_request_file_job.h
@@ -11,7 +11,6 @@
#include "base/files/file_path.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
-#include "net/base/net_errors.h"
#include "net/base/net_export.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request.h"
@@ -39,7 +38,7 @@ class NET_EXPORT URLRequestFileJob : public URLRequestJob {
// URLRequestJob:
void Start() override;
void Kill() override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
Filter* SetupFilter() const override;
bool GetMimeType(std::string* mime_type) const override;
@@ -98,12 +97,9 @@ class NET_EXPORT URLRequestFileJob : public URLRequestJob {
FileMetaInfo meta_info_;
const scoped_refptr<base::TaskRunner> file_task_runner_;
- std::vector<HttpByteRange> byte_ranges_;
HttpByteRange byte_range_;
int64 remaining_bytes_;
- Error range_parse_result_;
-
base::WeakPtrFactory<URLRequestFileJob> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(URLRequestFileJob);
diff --git a/net/url_request/url_request_ftp_job.cc b/net/url_request/url_request_ftp_job.cc
index e422d03..3a088bd 100644
--- a/net/url_request/url_request_ftp_job.cc
+++ b/net/url_request/url_request_ftp_job.cc
@@ -239,7 +239,7 @@ void URLRequestFtpJob::OnStartCompleted(int result) {
HandleAuthNeededResponse();
return;
} else {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
}
}
@@ -251,7 +251,15 @@ void URLRequestFtpJob::OnStartCompletedAsync(int result) {
void URLRequestFtpJob::OnReadCompleted(int result) {
read_in_progress_ = false;
- ReadRawDataComplete(result);
+ if (result == 0) {
+ NotifyDone(URLRequestStatus());
+ } else if (result < 0) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ } else {
+ // Clear the IO_PENDING status
+ SetStatus(URLRequestStatus());
+ }
+ NotifyReadComplete(result);
}
void URLRequestFtpJob::RestartTransactionWithAuth() {
@@ -344,12 +352,14 @@ UploadProgress URLRequestFtpJob::GetUploadProgress() const {
return UploadProgress();
}
-int URLRequestFtpJob::ReadRawData(IOBuffer* buf, int buf_size) {
+bool URLRequestFtpJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
DCHECK_NE(buf_size, 0);
+ DCHECK(bytes_read);
DCHECK(!read_in_progress_);
int rv;
-
if (proxy_info_.is_direct()) {
rv = ftp_transaction_->Read(buf, buf_size,
base::Bind(&URLRequestFtpJob::OnReadCompleted,
@@ -360,9 +370,18 @@ int URLRequestFtpJob::ReadRawData(IOBuffer* buf, int buf_size) {
base::Unretained(this)));
}
- if (rv == ERR_IO_PENDING)
+ if (rv >= 0) {
+ *bytes_read = rv;
+ return true;
+ }
+
+ if (rv == ERR_IO_PENDING) {
read_in_progress_ = true;
- return rv;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ }
+ return false;
}
void URLRequestFtpJob::HandleAuthNeededResponse() {
diff --git a/net/url_request/url_request_ftp_job.h b/net/url_request/url_request_ftp_job.h
index 6315d8f..74caf72 100644
--- a/net/url_request/url_request_ftp_job.h
+++ b/net/url_request/url_request_ftp_job.h
@@ -71,7 +71,7 @@ class NET_EXPORT_PRIVATE URLRequestFtpJob : public URLRequestJob {
// TODO(ibrar): Yet to give another look at this function.
UploadProgress GetUploadProgress() const override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
void HandleAuthNeededResponse();
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index 0254a5a..a452ec2 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -414,6 +414,11 @@ void URLRequestHttpJob::NotifyHeadersComplete() {
URLRequestJob::NotifyHeadersComplete();
}
+void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
+ DoneWithRequest(FINISHED);
+ URLRequestJob::NotifyDone(status);
+}
+
void URLRequestHttpJob::DestroyTransaction() {
DCHECK(transaction_.get());
@@ -993,16 +998,19 @@ void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
void URLRequestHttpJob::OnReadCompleted(int result) {
read_in_progress_ = false;
- DCHECK_NE(ERR_IO_PENDING, result);
-
if (ShouldFixMismatchedContentLength(result))
result = OK;
- // EOF or error, done with this job.
- if (result <= 0)
- DoneWithRequest(FINISHED);
+ if (result == OK) {
+ NotifyDone(URLRequestStatus());
+ } else if (result < 0) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ } else {
+ // Clear the IO_PENDING status
+ SetStatus(URLRequestStatus());
+ }
- ReadRawDataComplete(result);
+ NotifyReadComplete(result);
}
void URLRequestHttpJob::RestartTransactionWithAuth(
@@ -1328,8 +1336,11 @@ bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
return false;
}
-int URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size) {
+bool URLRequestHttpJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
DCHECK_NE(buf_size, 0);
+ DCHECK(bytes_read);
DCHECK(!read_in_progress_);
int rv = transaction_->Read(
@@ -1337,15 +1348,23 @@ int URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size) {
base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
if (ShouldFixMismatchedContentLength(rv))
- rv = OK;
+ rv = 0;
- if (rv == 0 || (rv < 0 && rv != ERR_IO_PENDING))
- DoneWithRequest(FINISHED);
+ if (rv >= 0) {
+ *bytes_read = rv;
+ if (!rv)
+ DoneWithRequest(FINISHED);
+ return true;
+ }
- if (rv == ERR_IO_PENDING)
+ if (rv == ERR_IO_PENDING) {
read_in_progress_ = true;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ }
- return rv;
+ return false;
}
void URLRequestHttpJob::StopCaching() {
diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h
index 62162b5..3d164c3 100644
--- a/net/url_request/url_request_http_job.h
+++ b/net/url_request/url_request_http_job.h
@@ -79,6 +79,9 @@ class NET_EXPORT_PRIVATE URLRequestHttpJob : public URLRequestJob {
// Shadows URLRequestJob's version of this method so we can grab cookies.
void NotifyHeadersComplete();
+ // Shadows URLRequestJob's method so we can record histograms.
+ void NotifyDone(const URLRequestStatus& status);
+
void DestroyTransaction();
void AddExtraHeaders();
@@ -130,7 +133,7 @@ class NET_EXPORT_PRIVATE URLRequestHttpJob : public URLRequestJob {
SSLPrivateKey* client_private_key) override;
void ContinueDespiteLastError() override;
void ResumeNetworkStart() override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
void StopCaching() override;
bool GetFullRequestHeaders(HttpRequestHeaders* headers) const override;
int64 GetTotalReceivedBytes() const override;
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 9386c5a..39ccf31 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -101,46 +101,45 @@ void URLRequestJob::DetachRequest() {
request_ = NULL;
}
-// This function calls ReadRawData to get stream data. If a filter exists, it
-// passes the data to the attached filter. It then returns the output from
-// filter back to the caller.
+// This function calls ReadData to get stream data. If a filter exists, passes
+// the data to the attached filter. Then returns the output from filter back to
+// the caller.
bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
+ bool rv = false;
+
DCHECK_LT(buf_size, 1000000); // Sanity check.
DCHECK(buf);
DCHECK(bytes_read);
DCHECK(filtered_read_buffer_.get() == NULL);
DCHECK_EQ(0, filtered_read_buffer_len_);
- Error error = OK;
*bytes_read = 0;
// Skip Filter if not present.
- if (!filter_) {
- error = ReadRawDataHelper(buf, buf_size, bytes_read);
+ if (!filter_.get()) {
+ rv = ReadRawDataHelper(buf, buf_size, bytes_read);
} else {
// Save the caller's buffers while we do IO
// in the filter's buffers.
filtered_read_buffer_ = buf;
filtered_read_buffer_len_ = buf_size;
- error = ReadFilteredData(bytes_read);
+ if (ReadFilteredData(bytes_read)) {
+ rv = true; // We have data to return.
- // Synchronous EOF from the filter.
- if (error == OK && *bytes_read == 0)
- DoneReading();
+ // It is fine to call DoneReading even if ReadFilteredData receives 0
+ // bytes from the net, but we avoid making that call if we know for
+ // sure that's the case (ReadRawDataHelper path).
+ if (*bytes_read == 0)
+ DoneReading();
+ } else {
+ rv = false; // Error, or a new IO is pending.
+ }
}
- if (error == OK) {
- // If URLRequestJob read zero bytes, the job is at EOF.
- if (*bytes_read == 0)
- NotifyDone(URLRequestStatus());
- } else if (error == ERR_IO_PENDING) {
- SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING));
- } else {
- NotifyDone(URLRequestStatus::FromError(error));
- *bytes_read = -1;
- }
- return error == OK;
+ if (rv && *bytes_read == 0)
+ NotifyDone(URLRequestStatus());
+ return rv;
}
void URLRequestJob::StopCaching() {
@@ -481,21 +480,11 @@ void URLRequestJob::NotifyHeadersComplete() {
request_->NotifyResponseStarted();
}
-void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) {
- if (result >= 0) {
- *error = OK;
- *count = result;
- } else {
- *error = static_cast<Error>(result);
- *count = 0;
- }
-}
-
-void URLRequestJob::ReadRawDataComplete(int result) {
+void URLRequestJob::NotifyReadComplete(int bytes_read) {
// TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed.
tracked_objects::ScopedTracker tracking_profile(
FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "475755 URLRequestJob::RawReadCompleted"));
+ "475755 URLRequestJob::NotifyReadComplete"));
if (!request_ || !request_->has_delegate())
return; // The request was destroyed, so there is no more work to do.
@@ -508,52 +497,11 @@ void URLRequestJob::ReadRawDataComplete(int result) {
// The headers should be complete before reads complete
DCHECK(has_handled_response_);
- Error error;
- int bytes_read;
- ConvertResultToError(result, &error, &bytes_read);
-
- DCHECK_NE(ERR_IO_PENDING, error);
-
- // Synchronize the URLRequest state machine with the URLRequestJob state
- // machine. If this read succeeded, either the request is at EOF and the
- // URLRequest state machine goes to 'finished', or it is not and the
- // URLRequest state machine goes to 'success'. If the read failed, the
- // URLRequest state machine goes directly to 'finished'.
- //
- // Update the URLRequest's status first, so that NotifyReadCompleted has an
- // accurate view of the request.
- if (error == OK && bytes_read > 0) {
- SetStatus(URLRequestStatus());
- } else {
- NotifyDone(URLRequestStatus::FromError(error));
- }
-
- GatherRawReadStats(error, bytes_read);
-
- if (filter_.get() && error == OK) {
- int filter_bytes_read = 0;
- // Tell the filter that it has more data.
- PushInputToFilter(bytes_read);
+ OnRawReadComplete(bytes_read);
- // Filter the data.
- error = ReadFilteredData(&filter_bytes_read);
-
- if (!filter_bytes_read)
- DoneReading();
-
- DVLOG(1) << __FUNCTION__ << "() "
- << "\"" << (request_ ? request_->url().spec() : "???") << "\""
- << " pre bytes read = " << bytes_read
- << " pre total = " << prefilter_bytes_read_
- << " post total = " << postfilter_bytes_read_;
- bytes_read = filter_bytes_read;
- } else {
- DVLOG(1) << __FUNCTION__ << "() "
- << "\"" << (request_ ? request_->url().spec() : "???") << "\""
- << " pre bytes read = " << bytes_read
- << " pre total = " << prefilter_bytes_read_
- << " post total = " << postfilter_bytes_read_;
- }
+ // Don't notify if we had an error.
+ if (!request_->status().is_success())
+ return;
// When notifying the delegate, the delegate can release the request
// (and thus release 'this'). After calling to the delegate, we must
@@ -562,10 +510,25 @@ void URLRequestJob::ReadRawDataComplete(int result) {
// survival until we can get out of this method.
scoped_refptr<URLRequestJob> self_preservation(this);
- // NotifyReadCompleted should be called after SetStatus or NotifyDone updates
- // the status.
- if (error == OK)
+ if (filter_.get()) {
+ // Tell the filter that it has more data
+ FilteredDataRead(bytes_read);
+
+ // Filter the data.
+ int filter_bytes_read = 0;
+ if (ReadFilteredData(&filter_bytes_read)) {
+ if (!filter_bytes_read)
+ DoneReading();
+ request_->NotifyReadCompleted(filter_bytes_read);
+ }
+ } else {
request_->NotifyReadCompleted(bytes_read);
+ }
+ DVLOG(1) << __FUNCTION__ << "() "
+ << "\"" << (request_ ? request_->url().spec() : "???") << "\""
+ << " pre bytes read = " << bytes_read
+ << " pre total = " << prefilter_bytes_read_
+ << " post total = " << postfilter_bytes_read_;
}
void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
@@ -592,7 +555,7 @@ void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
// the response before getting here.
DCHECK(has_handled_response_ || !status.is_success());
- // As with RawReadCompleted, we need to take care to notice if we were
+ // As with NotifyReadComplete, we need to take care to notice if we were
// destroyed during a delegate callback.
if (request_) {
request_->set_is_pending(false);
@@ -675,8 +638,10 @@ void URLRequestJob::OnCallToDelegateComplete() {
request_->OnCallToDelegateComplete();
}
-int URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size) {
- return 0;
+bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) {
+ DCHECK(bytes_read);
+ *bytes_read = 0;
+ return true;
}
void URLRequestJob::DoneReading() {
@@ -686,34 +651,38 @@ void URLRequestJob::DoneReading() {
void URLRequestJob::DoneReadingRedirectResponse() {
}
-void URLRequestJob::PushInputToFilter(int bytes_read) {
+void URLRequestJob::FilteredDataRead(int bytes_read) {
DCHECK(filter_);
filter_->FlushStreamBuffer(bytes_read);
}
-Error URLRequestJob::ReadFilteredData(int* bytes_read) {
+bool URLRequestJob::ReadFilteredData(int* bytes_read) {
DCHECK(filter_);
DCHECK(filtered_read_buffer_.get());
DCHECK_GT(filtered_read_buffer_len_, 0);
DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
- DCHECK(!raw_read_buffer_);
+ DCHECK(!raw_read_buffer_.get());
*bytes_read = 0;
- Error error = ERR_FAILED;
+ bool rv = false;
for (;;) {
if (is_done())
- return OK;
+ return true;
if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
// We don't have any raw data to work with, so read from the transaction.
int filtered_data_read;
- error = ReadRawDataForFilter(&filtered_data_read);
- // If ReadRawDataForFilter returned some data, fall through to the case
- // below; otherwise, return early.
- if (error != OK || filtered_data_read == 0)
- return error;
- filter_->FlushStreamBuffer(filtered_data_read);
+ if (ReadRawDataForFilter(&filtered_data_read)) {
+ if (filtered_data_read > 0) {
+ // Give data to filter.
+ filter_->FlushStreamBuffer(filtered_data_read);
+ } else {
+ return true; // EOF.
+ }
+ } else {
+ return false; // IO Pending (or error).
+ }
}
if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
@@ -739,7 +708,7 @@ Error URLRequestJob::ReadFilteredData(int* bytes_read) {
filter_needs_more_output_space_ = false;
*bytes_read = filtered_data_len;
postfilter_bytes_read_ += filtered_data_len;
- error = OK;
+ rv = true;
break;
}
case Filter::FILTER_NEED_MORE_DATA: {
@@ -752,7 +721,7 @@ Error URLRequestJob::ReadFilteredData(int* bytes_read) {
if (filtered_data_len > 0) {
*bytes_read = filtered_data_len;
postfilter_bytes_read_ += filtered_data_len;
- error = OK;
+ rv = true;
} else {
// Read again since we haven't received enough data yet (e.g., we
// may not have a complete gzip header yet).
@@ -763,7 +732,7 @@ Error URLRequestJob::ReadFilteredData(int* bytes_read) {
case Filter::FILTER_OK: {
*bytes_read = filtered_data_len;
postfilter_bytes_read_ += filtered_data_len;
- error = OK;
+ rv = true;
break;
}
case Filter::FILTER_ERROR: {
@@ -771,19 +740,21 @@ Error URLRequestJob::ReadFilteredData(int* bytes_read) {
<< "\"" << (request_ ? request_->url().spec() : "???")
<< "\"" << " Filter Error";
filter_needs_more_output_space_ = false;
- error = ERR_CONTENT_DECODING_FAILED;
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_CONTENT_DECODING_FAILED));
+ rv = false;
break;
}
default: {
NOTREACHED();
filter_needs_more_output_space_ = false;
- error = ERR_FAILED;
+ rv = false;
break;
}
}
// If logging all bytes is enabled, log the filtered bytes read.
- if (error == OK && request() && filtered_data_len > 0 &&
+ if (rv && request() && filtered_data_len > 0 &&
request()->net_log().IsCapturing()) {
request()->net_log().AddByteTransferEvent(
NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len,
@@ -791,18 +762,18 @@ Error URLRequestJob::ReadFilteredData(int* bytes_read) {
}
} else {
// we are done, or there is no data left.
- error = OK;
+ rv = true;
}
break;
}
- if (error == OK) {
+ if (rv) {
// When we successfully finished a read, we no longer need to save the
// caller's buffers. Release our reference.
filtered_read_buffer_ = NULL;
filtered_read_buffer_len_ = 0;
}
- return error;
+ return rv;
}
void URLRequestJob::DestroyFilters() {
@@ -835,8 +806,9 @@ void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
request_->proxy_server_ = proxy_server;
}
-Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
- Error error = ERR_FAILED;
+bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
+ bool rv = false;
+
DCHECK(bytes_read);
DCHECK(filter_.get());
@@ -848,28 +820,30 @@ Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
if (!filter_->stream_data_len() && !is_done()) {
IOBuffer* stream_buffer = filter_->stream_buffer();
int stream_buffer_size = filter_->stream_buffer_size();
- error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
+ rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
}
- return error;
+ return rv;
}
-Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
- DCHECK(!raw_read_buffer_);
+bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
+ DCHECK(!request_->status().is_io_pending());
+ DCHECK(raw_read_buffer_.get() == NULL);
- // Keep a pointer to the read buffer, so we have access to it in
- // GatherRawReadStats() in the event that the read completes asynchronously.
+ // Keep a pointer to the read buffer, so we have access to it in the
+ // OnRawReadComplete() callback in the event that the read completes
+ // asynchronously.
raw_read_buffer_ = buf;
- Error error;
- ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read);
+ bool rv = ReadRawData(buf, buf_size, bytes_read);
- if (error != ERR_IO_PENDING) {
- // If the read completes synchronously, either success or failure, invoke
- // GatherRawReadStats so we can account for the completed read.
- GatherRawReadStats(error, *bytes_read);
+ if (!request_->status().is_io_pending()) {
+ // If the read completes synchronously, either success or failure,
+ // invoke the OnRawReadComplete callback so we can account for the
+ // completed read.
+ OnRawReadComplete(*bytes_read);
}
- return error;
+ return rv;
}
void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
@@ -878,16 +852,9 @@ void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
}
-void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) {
- DCHECK(raw_read_buffer_ || bytes_read == 0);
- DCHECK_NE(ERR_IO_PENDING, error);
-
- if (error != OK) {
- raw_read_buffer_ = nullptr;
- return;
- }
- // If |filter_| is non-NULL, bytes will be logged after it is applied
- // instead.
+void URLRequestJob::OnRawReadComplete(int bytes_read) {
+ DCHECK(raw_read_buffer_.get());
+ // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
if (!filter_.get() && request() && bytes_read > 0 &&
request()->net_log().IsCapturing()) {
request()->net_log().AddByteTransferEvent(
@@ -898,7 +865,7 @@ void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) {
if (bytes_read > 0) {
RecordBytesRead(bytes_read);
}
- raw_read_buffer_ = nullptr;
+ raw_read_buffer_ = NULL;
}
void URLRequestJob::RecordBytesRead(int bytes_read) {
diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h
index 83358f3..f010913 100644
--- a/net/url_request/url_request_job.h
+++ b/net/url_request/url_request_job.h
@@ -17,7 +17,6 @@
#include "base/power_monitor/power_observer.h"
#include "net/base/host_port_pair.h"
#include "net/base/load_states.h"
-#include "net/base/net_errors.h"
#include "net/base/net_export.h"
#include "net/base/request_priority.h"
#include "net/base/upload_progress.h"
@@ -279,9 +278,23 @@ class NET_EXPORT URLRequestJob
// Notifies the job that headers have been received.
void NotifyHeadersComplete();
+ // Notifies the request that the job has completed a Read operation.
+ void NotifyReadComplete(int bytes_read);
+
// Notifies the request that a start error has occurred.
void NotifyStartError(const URLRequestStatus& status);
+ // NotifyDone marks when we are done with a request. It is really
+ // a glorified set_status, but also does internal state checking and
+ // job tracking. It should be called once per request, when the job is
+ // finished doing all IO.
+ void NotifyDone(const URLRequestStatus& status);
+
+ // Some work performed by NotifyDone must be completed on a separate task
+ // so as to avoid re-entering the delegate. This method exists to perform
+ // that work.
+ void CompleteNotifyDone();
+
// Used as an asynchronous callback for Kill to notify the URLRequest
// that we were canceled.
void NotifyCanceled();
@@ -294,19 +307,17 @@ class NET_EXPORT URLRequestJob
void OnCallToDelegate();
void OnCallToDelegateComplete();
- // Called to read raw (pre-filtered) data from this Job. Reads at most
- // |buf_size| bytes into |buf|.
- // Possible return values:
- // >= 0: Read completed synchronously. Return value is the number of bytes
- // read. 0 means eof.
- // ERR_IO_PENDING: Read pending asynchronously.
- // When the read completes, |ReadRawDataComplete| should be
- // called.
- // Any other negative number: Read failed synchronously. Return value is a
- // network error code.
- // This method might hold onto a reference to |buf| (by incrementing the
- // refcount) until the method completes or is cancelled.
- virtual int ReadRawData(IOBuffer* buf, int buf_size);
+ // Called to read raw (pre-filtered) data from this Job.
+ // If returning true, data was read from the job. buf will contain
+ // the data, and bytes_read will receive the number of bytes read.
+ // If returning true, and bytes_read is returned as 0, there is no
+ // additional data to be read.
+ // If returning false, an error occurred or an async IO is now pending.
+ // If async IO is pending, the status of the request will be
+ // URLRequestStatus::IO_PENDING, and buf must remain available until the
+ // operation is completed. See comments on URLRequest::Read for more
+ // info.
+ virtual bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read);
// Called to tell the job that a filter has successfully reached the end of
// the stream.
@@ -317,14 +328,14 @@ class NET_EXPORT URLRequestJob
// bodies are never read.
virtual void DoneReadingRedirectResponse();
- // Reads filtered data from the request. Returns OK if immediately successful,
- // ERR_IO_PENDING if the request couldn't complete synchronously, and some
- // other error code if the request failed synchronously. Note that this
- // function can issue new asynchronous requests if needed, in which case it
- // returns ERR_IO_PENDING. If this method completes synchronously,
- // |*bytes_read| is the number of bytes output by the filter chain if this
- // method returns OK, or zero if this method returns an error.
- Error ReadFilteredData(int* bytes_read);
+ // Informs the filter that data has been read into its buffer
+ void FilteredDataRead(int bytes_read);
+
+ // Reads filtered data from the request. Returns true if successful,
+ // false otherwise. Note, if there is not enough data received to
+ // return data, this call can issue a new async IO request under
+ // the hood.
+ bool ReadFilteredData(int* bytes_read);
// Whether the response is being filtered in this job.
// Only valid after NotifyHeadersComplete() has been called.
@@ -356,33 +367,19 @@ class NET_EXPORT URLRequestJob
// reflects bytes read even when there is no filter.
int64 postfilter_bytes_read() const { return postfilter_bytes_read_; }
- // Turns an integer result code into an Error and a count of bytes read.
- // The semantics are:
- // |result| >= 0: |*error| == OK, |*count| == |result|
- // |result| < 0: |*error| = |result|, |*count| == 0
- static void ConvertResultToError(int result, Error* error, int* count);
-
- // Completion callback for raw reads. See |ReadRawData| for details.
- // |bytes_read| is either >= 0 to indicate a successful read and count of
- // bytes read, or < 0 to indicate an error.
- void ReadRawDataComplete(int bytes_read);
-
// The request that initiated this job. This value MAY BE NULL if the
// request was released by DetachRequest().
URLRequest* request_;
private:
// When data filtering is enabled, this function is used to read data
- // for the filter. Returns a net error code to indicate if raw data was
- // successfully read, an error happened, or the IO is pending.
- Error ReadRawDataForFilter(int* bytes_read);
-
- // Informs the filter chain that data has been read into its buffer.
- void PushInputToFilter(int bytes_read);
+ // for the filter. Returns true if raw data was read. Returns false if
+ // an error occurred (or we are waiting for IO to complete).
+ bool ReadRawDataForFilter(int* bytes_read);
// Invokes ReadRawData and records bytes read if the read completes
// synchronously.
- Error ReadRawDataHelper(IOBuffer* buf, int buf_size, int* bytes_read);
+ bool ReadRawDataHelper(IOBuffer* buf, int buf_size, int* bytes_read);
// Called in response to a redirect that was not canceled to follow the
// redirect. The current job will be replaced with a new job loading the
@@ -391,9 +388,9 @@ class NET_EXPORT URLRequestJob
// Called after every raw read. If |bytes_read| is > 0, this indicates
// a successful read of |bytes_read| unfiltered bytes. If |bytes_read|
- // is 0, this indicates that there is no additional data to read. |error|
- // specifies whether an error occurred and no bytes were read.
- void GatherRawReadStats(Error error, int bytes_read);
+ // is 0, this indicates that there is no additional data to read. If
+ // |bytes_read| is < 0, an error occurred and no bytes were read.
+ void OnRawReadComplete(int bytes_read);
// Updates the profiling info and notifies observers that an additional
// |bytes_read| unfiltered bytes have been read for this job.
@@ -403,16 +400,6 @@ class NET_EXPORT URLRequestJob
// out.
bool FilterHasData();
- // NotifyDone marks that request is done. It is really a glorified
- // set_status, but also does internal state checking and job tracking. It
- // should be called once per request, when the job is finished doing all IO.
- void NotifyDone(const URLRequestStatus& status);
-
- // Some work performed by NotifyDone must be completed asynchronously so
- // as to avoid re-entering URLRequest::Delegate. This method performs that
- // work.
- void CompleteNotifyDone();
-
// Subclasses may implement this method to record packet arrival times.
// The default implementation does nothing. Only invoked when bytes have been
// read since the last invocation.
diff --git a/net/url_request/url_request_job_unittest.cc b/net/url_request/url_request_job_unittest.cc
index e0d19b3..f9ea1fe 100644
--- a/net/url_request/url_request_job_unittest.cc
+++ b/net/url_request/url_request_job_unittest.cc
@@ -76,24 +76,6 @@ const MockTransaction kRedirect_Transaction = {
OK,
};
-const MockTransaction kEmptyBodyGzip_Transaction = {
- "http://www.google.com/empty_body",
- "GET",
- base::Time(),
- "",
- LOAD_NORMAL,
- "HTTP/1.1 200 OK",
- "Content-Encoding: gzip\n",
- base::Time(),
- "",
- TEST_MODE_NORMAL,
- nullptr,
- nullptr,
- 0,
- 0,
- OK,
-};
-
} // namespace
TEST(URLRequestJob, TransactionNotifiedWhenDone) {
@@ -205,30 +187,4 @@ TEST(URLRequestJob, TransactionNotCachedWhenNetworkDelegateRedirects) {
RemoveMockTransaction(&kGZip_Transaction);
}
-// Makes sure that ReadRawDataComplete correctly updates request status before
-// calling ReadFilteredData.
-// Regression test for crbug.com/553300.
-TEST(URLRequestJob, EmptyBodySkipFilter) {
- MockNetworkLayer network_layer;
- TestURLRequestContext context;
- context.set_http_transaction_factory(&network_layer);
-
- TestDelegate d;
- scoped_ptr<URLRequest> req(context.CreateRequest(
- GURL(kEmptyBodyGzip_Transaction.url), DEFAULT_PRIORITY, &d));
- AddMockTransaction(&kEmptyBodyGzip_Transaction);
-
- req->set_method("GET");
- req->Start();
-
- base::MessageLoop::current()->Run();
-
- EXPECT_FALSE(d.request_failed());
- EXPECT_EQ(200, req->GetResponseCode());
- EXPECT_TRUE(d.data_received().empty());
- EXPECT_TRUE(network_layer.done_reading_called());
-
- RemoveMockTransaction(&kEmptyBodyGzip_Transaction);
-}
-
} // namespace net
diff --git a/net/url_request/url_request_simple_job.cc b/net/url_request/url_request_simple_job.cc
index c12555d..bc15079 100644
--- a/net/url_request/url_request_simple_job.cc
+++ b/net/url_request/url_request_simple_job.cc
@@ -65,20 +65,33 @@ bool URLRequestSimpleJob::GetCharset(std::string* charset) {
URLRequestSimpleJob::~URLRequestSimpleJob() {}
-int URLRequestSimpleJob::ReadRawData(IOBuffer* buf, int buf_size) {
- buf_size = std::min(static_cast<int64>(buf_size),
- byte_range_.last_byte_position() - next_data_offset_ + 1);
- if (buf_size == 0)
- return 0;
+bool URLRequestSimpleJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
+ DCHECK(bytes_read);
+ buf_size = static_cast<int>(
+ std::min(static_cast<int64>(buf_size),
+ byte_range_.last_byte_position() - next_data_offset_ + 1));
+ DCHECK_GE(buf_size, 0);
+ if (buf_size == 0) {
+ *bytes_read = 0;
+ return true;
+ }
// Do memory copy on a background thread. See crbug.com/422489.
GetTaskRunner()->PostTaskAndReply(
FROM_HERE, base::Bind(&CopyData, make_scoped_refptr(buf), buf_size, data_,
next_data_offset_),
- base::Bind(&URLRequestSimpleJob::ReadRawDataComplete,
+ base::Bind(&URLRequestSimpleJob::OnReadCompleted,
weak_factory_.GetWeakPtr(), buf_size));
next_data_offset_ += buf_size;
- return ERR_IO_PENDING;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return false;
+}
+
+void URLRequestSimpleJob::OnReadCompleted(int bytes_read) {
+ SetStatus(URLRequestStatus());
+ NotifyReadComplete(bytes_read);
}
base::TaskRunner* URLRequestSimpleJob::GetTaskRunner() const {
@@ -109,8 +122,8 @@ void URLRequestSimpleJob::StartAsync() {
return;
if (ranges().size() > 1) {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -130,8 +143,8 @@ void URLRequestSimpleJob::OnGetDataCompleted(int result) {
if (result == OK) {
// Notify that the headers are complete
if (!byte_range_.ComputeBounds(data_->size())) {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
diff --git a/net/url_request/url_request_simple_job.h b/net/url_request/url_request_simple_job.h
index 06f718e..6c9d5e8 100644
--- a/net/url_request/url_request_simple_job.h
+++ b/net/url_request/url_request_simple_job.h
@@ -28,7 +28,7 @@ class NET_EXPORT URLRequestSimpleJob : public URLRangeRequestJob {
void Start() override;
void Kill() override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
bool GetMimeType(std::string* mime_type) const override;
bool GetCharset(std::string* charset) override;
@@ -66,6 +66,7 @@ class NET_EXPORT URLRequestSimpleJob : public URLRangeRequestJob {
private:
void OnGetDataCompleted(int result);
+ void OnReadCompleted(int bytes_read);
HttpByteRange byte_range_;
std::string mime_type_;
diff --git a/net/url_request/url_request_status.cc b/net/url_request/url_request_status.cc
index 7207df2..2a6a2fa 100644
--- a/net/url_request/url_request_status.cc
+++ b/net/url_request/url_request_status.cc
@@ -46,19 +46,4 @@ URLRequestStatus URLRequestStatus::FromError(int error) {
}
}
-Error URLRequestStatus::ToNetError() const {
- switch (status_) {
- case SUCCESS:
- return OK;
- case IO_PENDING:
- return ERR_IO_PENDING;
- case CANCELED:
- return ERR_ABORTED;
- case FAILED:
- return static_cast<Error>(error_);
- }
- NOTREACHED();
- return ERR_FAILED;
-}
-
} // namespace net
diff --git a/net/url_request/url_request_status.h b/net/url_request/url_request_status.h
index 694c510..44a5d22 100644
--- a/net/url_request/url_request_status.h
+++ b/net/url_request/url_request_status.h
@@ -5,7 +5,6 @@
#ifndef NET_URL_REQUEST_URL_REQUEST_STATUS_H_
#define NET_URL_REQUEST_URL_REQUEST_STATUS_H_
-#include "net/base/net_errors.h"
#include "net/base/net_export.h"
namespace net {
@@ -42,13 +41,6 @@ class NET_EXPORT URLRequestStatus {
// deprecated. See https://crbug.com/490311.
static URLRequestStatus FromError(int error);
- // Returns a Error corresponding to |status_|.
- // OK for OK
- // ERR_IO_PENDING for IO_PENDING
- // ERR_ABORTED for CANCELLED
- // Error for FAILED
- Error ToNetError() const;
-
Status status() const { return status_; }
int error() const { return error_; }
diff --git a/net/url_request/url_request_test_job.cc b/net/url_request/url_request_test_job.cc
index bd1c905..8a293c3 100644
--- a/net/url_request/url_request_test_job.cc
+++ b/net/url_request/url_request_test_job.cc
@@ -210,8 +210,7 @@ void URLRequestTestJob::StartAsync() {
// unexpected url, return error
// FIXME(brettw) we may want to use WININET errors or have some more types
// of errors
- NotifyStartError(
- URLRequestStatus(URLRequestStatus::FAILED, ERR_INVALID_URL));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, ERR_INVALID_URL));
// FIXME(brettw): this should emulate a network error, and not just fail
// initiating a connection
return;
@@ -223,15 +222,22 @@ void URLRequestTestJob::StartAsync() {
this->NotifyHeadersComplete();
}
-int URLRequestTestJob::ReadRawData(IOBuffer* buf, int buf_size) {
+bool URLRequestTestJob::ReadRawData(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
if (stage_ == WAITING) {
async_buf_ = buf;
async_buf_size_ = buf_size;
- return ERR_IO_PENDING;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return false;
}
- if (offset_ >= static_cast<int>(response_data_.length()))
- return 0; // done reading
+ DCHECK(bytes_read);
+ *bytes_read = 0;
+
+ if (offset_ >= static_cast<int>(response_data_.length())) {
+ return true; // done reading
+ }
int to_read = buf_size;
if (to_read + offset_ > static_cast<int>(response_data_.length()))
@@ -240,7 +246,8 @@ int URLRequestTestJob::ReadRawData(IOBuffer* buf, int buf_size) {
memcpy(buf->data(), &response_data_.c_str()[offset_], to_read);
offset_ += to_read;
- return to_read;
+ *bytes_read = to_read;
+ return true;
}
void URLRequestTestJob::GetResponseInfo(HttpResponseInfo* info) {
@@ -298,15 +305,16 @@ void URLRequestTestJob::ProcessNextOperation() {
stage_ = DATA_AVAILABLE;
// OK if ReadRawData wasn't called yet.
if (async_buf_) {
- int result = ReadRawData(async_buf_, async_buf_size_);
- if (result < 0)
- NOTREACHED() << "Reads should not fail in DATA_AVAILABLE.";
+ int bytes_read;
+ if (!ReadRawData(async_buf_, async_buf_size_, &bytes_read))
+ NOTREACHED() << "This should not return false in DATA_AVAILABLE.";
+ SetStatus(URLRequestStatus()); // clear the io pending flag
if (NextReadAsync()) {
// Make all future reads return io pending until the next
// ProcessNextOperation().
stage_ = WAITING;
}
- ReadRawDataComplete(result);
+ NotifyReadComplete(bytes_read);
}
break;
case DATA_AVAILABLE:
diff --git a/net/url_request/url_request_test_job.h b/net/url_request/url_request_test_job.h
index eb1db01..09b31a4 100644
--- a/net/url_request/url_request_test_job.h
+++ b/net/url_request/url_request_test_job.h
@@ -111,7 +111,7 @@ class NET_EXPORT_PRIVATE URLRequestTestJob : public URLRequestJob {
// Job functions
void SetPriority(RequestPriority priority) override;
void Start() override;
- int ReadRawData(IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
void Kill() override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(HttpResponseInfo* info) override;
diff --git a/storage/browser/blob/blob_url_request_job.cc b/storage/browser/blob/blob_url_request_job.cc
index deea300..6ec2fe5 100644
--- a/storage/browser/blob/blob_url_request_job.cc
+++ b/storage/browser/blob/blob_url_request_job.cc
@@ -75,37 +75,41 @@ void BlobURLRequestJob::Kill() {
weak_factory_.InvalidateWeakPtrs();
}
-int BlobURLRequestJob::ReadRawData(net::IOBuffer* dest, int dest_size) {
+bool BlobURLRequestJob::ReadRawData(net::IOBuffer* dest,
+ int dest_size,
+ int* bytes_read) {
TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
DCHECK_NE(dest_size, 0);
+ DCHECK(bytes_read);
- // Bail out immediately if we encounter an error. This happens if a previous
- // ReadRawData signalled an error to its caller but the caller called
- // ReadRawData again anyway.
- if (error_)
- return 0;
+ // Bail out immediately if we encounter an error.
+ if (error_) {
+ *bytes_read = 0;
+ return true;
+ }
- int bytes_read = 0;
BlobReader::Status read_status =
- blob_reader_->Read(dest, dest_size, &bytes_read,
+ blob_reader_->Read(dest, dest_size, bytes_read,
base::Bind(&BlobURLRequestJob::DidReadRawData,
weak_factory_.GetWeakPtr()));
switch (read_status) {
case BlobReader::Status::NET_ERROR:
+ NotifyFailure(blob_reader_->net_error());
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
- return blob_reader_->net_error();
+ return false;
case BlobReader::Status::IO_PENDING:
- return net::ERR_IO_PENDING;
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
case BlobReader::Status::DONE:
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
- return bytes_read;
+ return true;
}
NOTREACHED();
- return 0;
+ return true;
}
bool BlobURLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -218,7 +222,13 @@ void BlobURLRequestJob::DidCalculateSize(int result) {
void BlobURLRequestJob::DidReadRawData(int result) {
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
- ReadRawDataComplete(result);
+ if (result < 0) {
+ NotifyFailure(result);
+ return;
+ }
+ // Clear the IO_PENDING status
+ SetStatus(net::URLRequestStatus());
+ NotifyReadComplete(result);
}
void BlobURLRequestJob::NotifyFailure(int error_code) {
@@ -226,7 +236,11 @@ void BlobURLRequestJob::NotifyFailure(int error_code) {
// If we already return the headers on success, we can't change the headers
// now. Instead, we just error out.
- DCHECK(!response_info_) << "Cannot NotifyFailure after headers.";
+ if (response_info_) {
+ NotifyDone(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, error_code));
+ return;
+ }
net::HttpStatusCode status_code = net::HTTP_INTERNAL_SERVER_ERROR;
switch (error_code) {
diff --git a/storage/browser/blob/blob_url_request_job.h b/storage/browser/blob/blob_url_request_job.h
index 1f0b9fb..21baa2c 100644
--- a/storage/browser/blob/blob_url_request_job.h
+++ b/storage/browser/blob/blob_url_request_job.h
@@ -44,7 +44,7 @@ class STORAGE_EXPORT BlobURLRequestJob
// net::URLRequestJob methods.
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
diff --git a/storage/browser/fileapi/file_system_dir_url_request_job.cc b/storage/browser/fileapi/file_system_dir_url_request_job.cc
index c053773..fc91f3b 100644
--- a/storage/browser/fileapi/file_system_dir_url_request_job.cc
+++ b/storage/browser/fileapi/file_system_dir_url_request_job.cc
@@ -14,6 +14,7 @@
#include "base/time/time.h"
#include "build/build_config.h"
#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/url_request/url_request.h"
#include "storage/browser/fileapi/file_system_context.h"
@@ -43,14 +44,16 @@ FileSystemDirURLRequestJob::FileSystemDirURLRequestJob(
FileSystemDirURLRequestJob::~FileSystemDirURLRequestJob() {
}
-int FileSystemDirURLRequestJob::ReadRawData(net::IOBuffer* dest,
- int dest_size) {
- int count = std::min(dest_size, base::checked_cast<int>(data_.size()));
+bool FileSystemDirURLRequestJob::ReadRawData(net::IOBuffer* dest,
+ int dest_size,
+ int* bytes_read) {
+ int count = std::min(dest_size, static_cast<int>(data_.size()));
if (count > 0) {
memcpy(dest->data(), data_.data(), count);
data_.erase(0, count);
}
- return count;
+ *bytes_read = count;
+ return true;
}
void FileSystemDirURLRequestJob::Start() {
@@ -96,7 +99,8 @@ void FileSystemDirURLRequestJob::StartAsync() {
false);
return;
}
- NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
+ NotifyDone(
+ URLRequestStatus(URLRequestStatus::FAILED, net::ERR_FILE_NOT_FOUND));
return;
}
file_system_context_->operation_runner()->ReadDirectory(
@@ -109,7 +113,8 @@ void FileSystemDirURLRequestJob::DidAttemptAutoMount(base::File::Error result) {
file_system_context_->CrackURL(request_->url()).is_valid()) {
StartAsync();
} else {
- NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
+ NotifyDone(
+ URLRequestStatus(URLRequestStatus::FAILED, net::ERR_FILE_NOT_FOUND));
}
}
@@ -121,7 +126,7 @@ void FileSystemDirURLRequestJob::DidReadDirectory(
int rv = net::ERR_FILE_NOT_FOUND;
if (result == base::File::FILE_ERROR_INVALID_URL)
rv = net::ERR_INVALID_URL;
- NotifyStartError(URLRequestStatus::FromError(rv));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
return;
}
@@ -170,7 +175,7 @@ void FileSystemDirURLRequestJob::DidGetMetadata(
int rv = net::ERR_FILE_NOT_FOUND;
if (result == base::File::FILE_ERROR_INVALID_URL)
rv = net::ERR_INVALID_URL;
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
}
if (!request_)
diff --git a/storage/browser/fileapi/file_system_dir_url_request_job.h b/storage/browser/fileapi/file_system_dir_url_request_job.h
index 93fdd1f..01bd372 100644
--- a/storage/browser/fileapi/file_system_dir_url_request_job.h
+++ b/storage/browser/fileapi/file_system_dir_url_request_job.h
@@ -32,7 +32,7 @@ class STORAGE_EXPORT FileSystemDirURLRequestJob : public net::URLRequestJob {
// URLRequestJob methods:
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
bool GetCharset(std::string* charset) override;
// FilterContext methods (via URLRequestJob):
diff --git a/storage/browser/fileapi/file_system_url_request_job.cc b/storage/browser/fileapi/file_system_url_request_job.cc
index 8edcb3d..8d58f64 100644
--- a/storage/browser/fileapi/file_system_url_request_job.cc
+++ b/storage/browser/fileapi/file_system_url_request_job.cc
@@ -62,7 +62,6 @@ FileSystemURLRequestJob::FileSystemURLRequestJob(
file_system_context_(file_system_context),
is_directory_(false),
remaining_bytes_(0),
- range_parse_result_(net::OK),
weak_factory_(this) {}
FileSystemURLRequestJob::~FileSystemURLRequestJob() {}
@@ -80,28 +79,39 @@ void FileSystemURLRequestJob::Kill() {
weak_factory_.InvalidateWeakPtrs();
}
-int FileSystemURLRequestJob::ReadRawData(net::IOBuffer* dest, int dest_size) {
+bool FileSystemURLRequestJob::ReadRawData(net::IOBuffer* dest,
+ int dest_size,
+ int* bytes_read) {
DCHECK_NE(dest_size, 0);
+ DCHECK(bytes_read);
DCHECK_GE(remaining_bytes_, 0);
if (reader_.get() == NULL)
- return net::ERR_FAILED;
+ return false;
if (remaining_bytes_ < dest_size)
- dest_size = remaining_bytes_;
+ dest_size = static_cast<int>(remaining_bytes_);
- if (!dest_size)
- return 0;
+ if (!dest_size) {
+ *bytes_read = 0;
+ return true;
+ }
const int rv = reader_->Read(dest, dest_size,
base::Bind(&FileSystemURLRequestJob::DidRead,
weak_factory_.GetWeakPtr()));
if (rv >= 0) {
+ // Data is immediately available.
+ *bytes_read = rv;
remaining_bytes_ -= rv;
DCHECK_GE(remaining_bytes_, 0);
+ return true;
}
-
- return rv;
+ if (rv == net::ERR_IO_PENDING)
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ else
+ NotifyFailed(rv);
+ return false;
}
bool FileSystemURLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -116,12 +126,8 @@ bool FileSystemURLRequestJob::GetMimeType(std::string* mime_type) const {
void FileSystemURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
- // Currently this job only cares about the Range header. Note that validation
- // is deferred to DidGetMetaData(), because NotifyStartError is not legal to
- // call since the job has not started.
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
std::vector<net::HttpByteRange> ranges;
-
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
byte_range_ = ranges[0];
@@ -129,7 +135,7 @@ void FileSystemURLRequestJob::SetExtraRequestHeaders(
// We don't support multiple range requests in one single URL request.
// TODO(adamk): decide whether we want to support multiple range
// requests.
- range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
+ NotifyFailed(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
}
}
}
@@ -161,7 +167,7 @@ void FileSystemURLRequestJob::StartAsync() {
}
if (!file_system_context_->CanServeURLRequest(url_)) {
// In incognito mode the API is not usable and there should be no data.
- NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
+ NotifyFailed(net::ERR_FILE_NOT_FOUND);
return;
}
file_system_context_->operation_runner()->GetMetadata(
@@ -176,7 +182,7 @@ void FileSystemURLRequestJob::DidAttemptAutoMount(base::File::Error result) {
file_system_context_->CrackURL(request_->url()).is_valid()) {
StartAsync();
} else {
- NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
+ NotifyFailed(net::ERR_FILE_NOT_FOUND);
}
}
@@ -184,10 +190,9 @@ void FileSystemURLRequestJob::DidGetMetadata(
base::File::Error error_code,
const base::File::Info& file_info) {
if (error_code != base::File::FILE_OK) {
- NotifyStartError(URLRequestStatus::FromError(
- error_code == base::File::FILE_ERROR_INVALID_URL
- ? net::ERR_INVALID_URL
- : net::ERR_FILE_NOT_FOUND));
+ NotifyFailed(error_code == base::File::FILE_ERROR_INVALID_URL
+ ? net::ERR_INVALID_URL
+ : net::ERR_FILE_NOT_FOUND);
return;
}
@@ -197,14 +202,8 @@ void FileSystemURLRequestJob::DidGetMetadata(
is_directory_ = file_info.is_directory;
- if (range_parse_result_ != net::OK) {
- NotifyStartError(URLRequestStatus::FromError(range_parse_result_));
- return;
- }
-
if (!byte_range_.ComputeBounds(file_info.size)) {
- NotifyStartError(
- URLRequestStatus::FromError(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyFailed(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
return;
}
@@ -228,12 +227,17 @@ void FileSystemURLRequestJob::DidGetMetadata(
}
void FileSystemURLRequestJob::DidRead(int result) {
- if (result >= 0) {
- remaining_bytes_ -= result;
- DCHECK_GE(remaining_bytes_, 0);
- }
+ if (result > 0)
+ SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
+ else if (result == 0)
+ NotifyDone(URLRequestStatus());
+ else
+ NotifyFailed(result);
+
+ remaining_bytes_ -= result;
+ DCHECK_GE(remaining_bytes_, 0);
- ReadRawDataComplete(result);
+ NotifyReadComplete(result);
}
bool FileSystemURLRequestJob::IsRedirectResponse(GURL* location,
@@ -253,4 +257,8 @@ bool FileSystemURLRequestJob::IsRedirectResponse(GURL* location,
return false;
}
+void FileSystemURLRequestJob::NotifyFailed(int rv) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+}
+
} // namespace storage
diff --git a/storage/browser/fileapi/file_system_url_request_job.h b/storage/browser/fileapi/file_system_url_request_job.h
index 308b66c..e442cae 100644
--- a/storage/browser/fileapi/file_system_url_request_job.h
+++ b/storage/browser/fileapi/file_system_url_request_job.h
@@ -11,7 +11,6 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request_job.h"
#include "storage/browser/fileapi/file_system_url.h"
@@ -42,7 +41,7 @@ class STORAGE_EXPORT FileSystemURLRequestJob : public net::URLRequestJob {
// URLRequestJob methods:
void Start() override;
void Kill() override;
- int ReadRawData(net::IOBuffer* buf, int buf_size) override;
+ bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
@@ -61,6 +60,7 @@ class STORAGE_EXPORT FileSystemURLRequestJob : public net::URLRequestJob {
void DidGetMetadata(base::File::Error error_code,
const base::File::Info& file_info);
void DidRead(int result);
+ void NotifyFailed(int rv);
const std::string storage_domain_;
FileSystemContext* file_system_context_;
@@ -69,7 +69,6 @@ class STORAGE_EXPORT FileSystemURLRequestJob : public net::URLRequestJob {
bool is_directory_;
scoped_ptr<net::HttpResponseInfo> response_info_;
int64 remaining_bytes_;
- net::Error range_parse_result_;
net::HttpByteRange byte_range_;
base::WeakPtrFactory<FileSystemURLRequestJob> weak_factory_;