summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--android_webview/browser/net/android_stream_reader_url_request_job.cc72
-rw-r--r--android_webview/browser/net/android_stream_reader_url_request_job.h4
-rw-r--r--chrome/browser/chromeos/fileapi/external_file_url_request_job.cc70
-rw-r--r--chrome/browser/chromeos/fileapi/external_file_url_request_job.h8
-rw-r--r--content/browser/android/url_request_content_job.cc81
-rw-r--r--content/browser/android/url_request_content_job.h6
-rw-r--r--content/browser/appcache/appcache_url_request_job.cc19
-rw-r--r--content/browser/appcache/appcache_url_request_job.h2
-rw-r--r--content/browser/fileapi/file_writer_delegate_unittest.cc8
-rw-r--r--content/browser/net/view_http_cache_job_factory.cc17
-rw-r--r--content/browser/service_worker/service_worker_read_from_cache_job.cc79
-rw-r--r--content/browser/service_worker/service_worker_read_from_cache_job.h3
-rw-r--r--content/browser/service_worker/service_worker_url_request_job.cc77
-rw-r--r--content/browser/service_worker/service_worker_url_request_job.h2
-rw-r--r--content/browser/service_worker/service_worker_write_to_cache_job.cc216
-rw-r--r--content/browser/service_worker/service_worker_write_to_cache_job.h32
-rw-r--r--content/browser/streams/stream_url_request_job.cc65
-rw-r--r--content/browser/streams/stream_url_request_job.h2
-rw-r--r--content/browser/webui/url_data_manager_backend.cc35
-rw-r--r--content/test/net/url_request_abort_on_end_job.cc17
-rw-r--r--content/test/net/url_request_abort_on_end_job.h2
-rw-r--r--ios/web/webui/url_data_manager_ios_backend.cc36
-rw-r--r--mojo/services/network/url_loader_impl_apptest.cc28
-rw-r--r--net/test/url_request/url_request_failed_job.cc51
-rw-r--r--net/test/url_request/url_request_failed_job.h3
-rw-r--r--net/test/url_request/url_request_mock_data_job.cc15
-rw-r--r--net/test/url_request/url_request_mock_data_job.h2
-rw-r--r--net/test/url_request/url_request_slow_download_job.cc27
-rw-r--r--net/test/url_request/url_request_slow_download_job.h2
-rw-r--r--net/url_request/url_request_file_dir_job.cc54
-rw-r--r--net/url_request/url_request_file_dir_job.h8
-rw-r--r--net/url_request/url_request_file_job.cc61
-rw-r--r--net/url_request/url_request_file_job.h6
-rw-r--r--net/url_request/url_request_ftp_job.cc31
-rw-r--r--net/url_request/url_request_ftp_job.h2
-rw-r--r--net/url_request/url_request_http_job.cc42
-rw-r--r--net/url_request/url_request_http_job.h5
-rw-r--r--net/url_request/url_request_job.cc231
-rw-r--r--net/url_request/url_request_job.h93
-rw-r--r--net/url_request/url_request_simple_job.cc34
-rw-r--r--net/url_request/url_request_simple_job.h3
-rw-r--r--net/url_request/url_request_status.cc15
-rw-r--r--net/url_request/url_request_status.h8
-rw-r--r--net/url_request/url_request_test_job.cc30
-rw-r--r--net/url_request/url_request_test_job.h2
-rw-r--r--storage/browser/blob/blob_url_request_job.cc42
-rw-r--r--storage/browser/blob/blob_url_request_job.h2
-rw-r--r--storage/browser/fileapi/file_system_dir_url_request_job.cc18
-rw-r--r--storage/browser/fileapi/file_system_dir_url_request_job.h2
-rw-r--r--storage/browser/fileapi/file_system_url_request_job.cc72
-rw-r--r--storage/browser/fileapi/file_system_url_request_job.h5
51 files changed, 802 insertions, 945 deletions
diff --git a/android_webview/browser/net/android_stream_reader_url_request_job.cc b/android_webview/browser/net/android_stream_reader_url_request_job.cc
index c1bbea8..dc288fa 100644
--- a/android_webview/browser/net/android_stream_reader_url_request_job.cc
+++ b/android_webview/browser/net/android_stream_reader_url_request_job.cc
@@ -21,7 +21,6 @@
#include "content/public/browser/browser_thread.h"
#include "net/base/io_buffer.h"
#include "net/base/mime_util.h"
-#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/http/http_response_headers.h"
#include "net/http/http_response_info.h"
@@ -90,6 +89,7 @@ AndroidStreamReaderURLRequestJob::AndroidStreamReaderURLRequestJob(
net::NetworkDelegate* network_delegate,
scoped_ptr<Delegate> delegate)
: URLRequestJob(request, network_delegate),
+ range_parse_result_(net::OK),
delegate_(delegate.Pass()),
weak_factory_(this) {
DCHECK(delegate_);
@@ -101,6 +101,7 @@ AndroidStreamReaderURLRequestJob::AndroidStreamReaderURLRequestJob(
scoped_ptr<DelegateObtainer> delegate_obtainer,
bool)
: URLRequestJob(request, network_delegate),
+ range_parse_result_(net::OK),
delegate_obtainer_(delegate_obtainer.Pass()),
weak_factory_(this) {
DCHECK(delegate_obtainer_);
@@ -141,7 +142,10 @@ void AndroidStreamReaderURLRequestJob::Start() {
base::Bind(&AndroidStreamReaderURLRequestJob::DelegateObtained,
weak_factory_.GetWeakPtr()));
} else {
- DoStart();
+ // Run DoStart asynchronously to avoid re-entering the delegate.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&AndroidStreamReaderURLRequestJob::DoStart,
+ weak_factory_.GetWeakPtr()));
}
}
@@ -202,60 +206,40 @@ void AndroidStreamReaderURLRequestJob::OnReaderSeekCompleted(int result) {
set_expected_content_size(result);
HeadersComplete(kHTTPOk, kHTTPOkText);
} else {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ NotifyStartError(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
}
}
void AndroidStreamReaderURLRequestJob::OnReaderReadCompleted(int result) {
DCHECK(thread_checker_.CalledOnValidThread());
- // The URLRequest API contract requires that:
- // * NotifyDone be called once, to set the status code, indicate the job is
- // finished (there will be no further IO),
- // * NotifyReadComplete be called if false is returned from ReadRawData to
- // indicate that the IOBuffer will not be used by the job anymore.
- // There might be multiple calls to ReadRawData (and thus multiple calls to
- // NotifyReadComplete), which is why NotifyDone is called only on errors
- // (result < 0) and end of data (result == 0).
- if (result == 0) {
- NotifyDone(net::URLRequestStatus());
- } else if (result < 0) {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
- } else {
- // Clear the IO_PENDING status.
- SetStatus(net::URLRequestStatus());
- }
- NotifyReadComplete(result);
+
+ ReadRawDataComplete(result);
}
base::TaskRunner* AndroidStreamReaderURLRequestJob::GetWorkerThreadRunner() {
return static_cast<base::TaskRunner*>(BrowserThread::GetBlockingPool());
}
-bool AndroidStreamReaderURLRequestJob::ReadRawData(net::IOBuffer* dest,
- int dest_size,
- int* bytes_read) {
+int AndroidStreamReaderURLRequestJob::ReadRawData(net::IOBuffer* dest,
+ int dest_size) {
DCHECK(thread_checker_.CalledOnValidThread());
if (!input_stream_reader_wrapper_.get()) {
// This will happen if opening the InputStream fails in which case the
// error is communicated by setting the HTTP response status header rather
// than failing the request during the header fetch phase.
- *bytes_read = 0;
- return true;
+ return 0;
}
PostTaskAndReplyWithResult(
- GetWorkerThreadRunner(),
- FROM_HERE,
+ GetWorkerThreadRunner(), FROM_HERE,
base::Bind(&InputStreamReaderWrapper::ReadRawData,
- input_stream_reader_wrapper_,
- make_scoped_refptr(dest),
+ input_stream_reader_wrapper_, make_scoped_refptr(dest),
dest_size),
base::Bind(&AndroidStreamReaderURLRequestJob::OnReaderReadCompleted,
weak_factory_.GetWeakPtr()));
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING,
- net::ERR_IO_PENDING));
- return false;
+ return net::ERR_IO_PENDING;
}
bool AndroidStreamReaderURLRequestJob::GetMimeType(
@@ -305,6 +289,11 @@ void AndroidStreamReaderURLRequestJob::DelegateObtained(
void AndroidStreamReaderURLRequestJob::DoStart() {
DCHECK(thread_checker_.CalledOnValidThread());
+ if (range_parse_result_ != net::OK) {
+ NotifyStartError(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ range_parse_result_));
+ return;
+ }
// Start reading asynchronously so that all error reporting and data
// callbacks happen as they would for network requests.
SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING,
@@ -385,19 +374,18 @@ void AndroidStreamReaderURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
- // We only extract the "Range" header so that we know how many bytes in the
- // stream to skip and how many to read after that.
+ // This job only cares about the Range header so that we know how many bytes
+ // in the stream to skip and how many to read after that. Note that
+ // validation is deferred to DoStart(), because NotifyStartError() is not
+ // legal to call since the job has not started.
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
- if (ranges.size() == 1) {
+ if (ranges.size() == 1)
byte_range_ = ranges[0];
- } else {
- // We don't support multiple range requests in one single URL request,
- // because we need to do multipart encoding here.
- NotifyDone(net::URLRequestStatus(
- net::URLRequestStatus::FAILED,
- net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
- }
+ } else {
+ // We don't support multiple range requests in one single URL request,
+ // because we need to do multipart encoding here.
+ range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
}
}
}
diff --git a/android_webview/browser/net/android_stream_reader_url_request_job.h b/android_webview/browser/net/android_stream_reader_url_request_job.h
index e21d765..645be13 100644
--- a/android_webview/browser/net/android_stream_reader_url_request_job.h
+++ b/android_webview/browser/net/android_stream_reader_url_request_job.h
@@ -15,6 +15,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread_checker.h"
+#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request_job.h"
@@ -96,7 +97,7 @@ class AndroidStreamReaderURLRequestJob : public net::URLRequestJob {
// URLRequestJob:
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
bool GetMimeType(std::string* mime_type) const override;
bool GetCharset(std::string* charset) override;
@@ -131,6 +132,7 @@ class AndroidStreamReaderURLRequestJob : public net::URLRequestJob {
void OnReaderReadCompleted(int bytes_read);
net::HttpByteRange byte_range_;
+ net::Error range_parse_result_;
scoped_ptr<net::HttpResponseInfo> response_info_;
scoped_ptr<Delegate> delegate_;
scoped_ptr<DelegateObtainer> delegate_obtainer_;
diff --git a/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc b/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc
index 2132113..4abe494 100644
--- a/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc
+++ b/chrome/browser/chromeos/fileapi/external_file_url_request_job.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
+#include "base/thread_task_runner_handle.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/chromeos/file_manager/fileapi_util.h"
#include "chrome/browser/chromeos/fileapi/external_file_url_util.h"
@@ -19,7 +20,6 @@
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/storage_partition.h"
#include "content/public/common/url_constants.h"
-#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/http/http_request_headers.h"
#include "net/http/http_response_info.h"
@@ -163,32 +163,48 @@ ExternalFileURLRequestJob::ExternalFileURLRequestJob(
net::NetworkDelegate* network_delegate)
: net::URLRequestJob(request, network_delegate),
profile_id_(profile_id),
+ range_parse_result_(net::OK),
remaining_bytes_(0),
- weak_ptr_factory_(this) {
-}
+ weak_ptr_factory_(this) {}
void ExternalFileURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
- // Note: We only support single range requests.
+ // Currently this job only cares about the Range header, and only supports
+ // single range requests. Note that validation is deferred to Start,
+ // because NotifyStartError is not legal to call since the job has not
+ // started.
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges) &&
ranges.size() == 1) {
byte_range_ = ranges[0];
} else {
- // Failed to parse Range: header, so notify the error.
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
}
}
}
void ExternalFileURLRequestJob::Start() {
+ // Post a task to invoke StartAsync asynchronously to avoid re-entering the
+ // delegate, because NotifyStartError is not legal to call synchronously in
+ // Start().
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&ExternalFileURLRequestJob::StartAsync,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+void ExternalFileURLRequestJob::StartAsync() {
DVLOG(1) << "Starting request";
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DCHECK(!stream_reader_);
+ if (range_parse_result_ != net::OK) {
+ NotifyStartError(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ range_parse_result_));
+ return;
+ }
+
// We only support GET request.
if (request()->method() != "GET") {
LOG(WARNING) << "Failed to start request: " << request()->method()
@@ -326,38 +342,23 @@ bool ExternalFileURLRequestJob::IsRedirectResponse(GURL* location,
return true;
}
-bool ExternalFileURLRequestJob::ReadRawData(net::IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
+int ExternalFileURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
DCHECK(stream_reader_);
- if (remaining_bytes_ == 0) {
- *bytes_read = 0;
- return true;
- }
+ if (remaining_bytes_ == 0)
+ return 0;
const int result = stream_reader_->Read(
- buf,
- std::min<int64>(buf_size, remaining_bytes_),
+ buf, std::min<int64>(buf_size, remaining_bytes_),
base::Bind(&ExternalFileURLRequestJob::OnReadCompleted,
weak_ptr_factory_.GetWeakPtr()));
- if (result == net::ERR_IO_PENDING) {
- // The data is not yet available.
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- return false;
- }
- if (result < 0) {
- // An error occurs.
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
- return false;
- }
+ if (result < 0)
+ return result;
- // Reading has been finished immediately.
- *bytes_read = result;
remaining_bytes_ -= result;
- return true;
+ return result;
}
ExternalFileURLRequestJob::~ExternalFileURLRequestJob() {
@@ -366,15 +367,10 @@ ExternalFileURLRequestJob::~ExternalFileURLRequestJob() {
void ExternalFileURLRequestJob::OnReadCompleted(int read_result) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
- if (read_result < 0) {
- DCHECK_NE(read_result, net::ERR_IO_PENDING);
- NotifyDone(
- net::URLRequestStatus(net::URLRequestStatus::FAILED, read_result));
- }
+ if (read_result > 0)
+ remaining_bytes_ -= read_result;
- remaining_bytes_ -= read_result;
- SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status.
- NotifyReadComplete(read_result);
+ ReadRawDataComplete(read_result);
}
} // namespace chromeos
diff --git a/chrome/browser/chromeos/fileapi/external_file_url_request_job.h b/chrome/browser/chromeos/fileapi/external_file_url_request_job.h
index a026e43..dff3379 100644
--- a/chrome/browser/chromeos/fileapi/external_file_url_request_job.h
+++ b/chrome/browser/chromeos/fileapi/external_file_url_request_job.h
@@ -63,12 +63,17 @@ class ExternalFileURLRequestJob : public net::URLRequestJob {
void Kill() override;
bool GetMimeType(std::string* mime_type) const override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
protected:
~ExternalFileURLRequestJob() override;
private:
+ // Helper method to start the job. Should be called asynchronously because
+ // NotifyStartError() is not legal to call synchronously in
+ // URLRequestJob::Start().
+ void StartAsync();
+
// Called from an internal helper class defined in drive_url_request_job.cc,
// which is running on the UI thread.
void OnHelperResultObtained(
@@ -91,6 +96,7 @@ class ExternalFileURLRequestJob : public net::URLRequestJob {
void* const profile_id_;
// The range of the file to be returned.
+ net::Error range_parse_result_;
net::HttpByteRange byte_range_;
int64 remaining_bytes_;
diff --git a/content/browser/android/url_request_content_job.cc b/content/browser/android/url_request_content_job.cc
index 594e89f..1bcbf210 100644
--- a/content/browser/android/url_request_content_job.cc
+++ b/content/browser/android/url_request_content_job.cc
@@ -11,7 +11,6 @@
#include "base/task_runner.h"
#include "net/base/file_stream.h"
#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
#include "net/http/http_util.h"
#include "net/url_request/url_request_error_job.h"
#include "url/gurl.h"
@@ -34,6 +33,7 @@ URLRequestContentJob::URLRequestContentJob(
content_path_(content_path),
stream_(new net::FileStream(content_task_runner)),
content_task_runner_(content_task_runner),
+ range_parse_result_(net::OK),
remaining_bytes_(0),
io_pending_(false),
weak_ptr_factory_(this) {}
@@ -56,44 +56,28 @@ void URLRequestContentJob::Kill() {
net::URLRequestJob::Kill();
}
-bool URLRequestContentJob::ReadRawData(net::IOBuffer* dest,
- int dest_size,
- int* bytes_read) {
+int URLRequestContentJob::ReadRawData(net::IOBuffer* dest, int dest_size) {
DCHECK_GT(dest_size, 0);
- DCHECK(bytes_read);
DCHECK_GE(remaining_bytes_, 0);
if (remaining_bytes_ < dest_size)
- dest_size = static_cast<int>(remaining_bytes_);
+ dest_size = remaining_bytes_;
// If we should copy zero bytes because |remaining_bytes_| is zero, short
// circuit here.
- if (!dest_size) {
- *bytes_read = 0;
- return true;
- }
+ if (!dest_size)
+ return 0;
- int rv = stream_->Read(dest,
- dest_size,
+ int rv = stream_->Read(dest, dest_size,
base::Bind(&URLRequestContentJob::DidRead,
- weak_ptr_factory_.GetWeakPtr(),
- make_scoped_refptr(dest)));
- if (rv >= 0) {
- // Data is immediately available.
- *bytes_read = rv;
- remaining_bytes_ -= rv;
- DCHECK_GE(remaining_bytes_, 0);
- return true;
- }
-
- // Otherwise, a read error occured. We may just need to wait...
+ weak_ptr_factory_.GetWeakPtr()));
if (rv == net::ERR_IO_PENDING) {
io_pending_ = true;
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- } else {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, rv));
+ } else if (rv > 0) {
+ remaining_bytes_ -= rv;
}
- return false;
+ DCHECK_GE(remaining_bytes_, 0);
+ return rv;
}
bool URLRequestContentJob::IsRedirectResponse(GURL* location,
@@ -116,16 +100,16 @@ void URLRequestContentJob::SetExtraRequestHeaders(
if (!headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header))
return;
- // We only care about "Range" header here.
+ // Currently this job only cares about the Range header. Note that validation
+ // is deferred to DidOpen(), because NotifyStartError is not legal to call
+ // since the job has not started.
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
byte_range_ = ranges[0];
} else {
// We don't support multiple range requests.
- NotifyDone(net::URLRequestStatus(
- net::URLRequestStatus::FAILED,
- net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
}
}
}
@@ -162,13 +146,20 @@ void URLRequestContentJob::DidFetchMetaInfo(const ContentMetaInfo* meta_info) {
void URLRequestContentJob::DidOpen(int result) {
if (result != net::OK) {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ NotifyStartError(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ return;
+ }
+
+ if (range_parse_result_ != net::OK) {
+ NotifyStartError(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ range_parse_result_));
return;
}
if (!byte_range_.ComputeBounds(meta_info_.content_size)) {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyStartError(net::URLRequestStatus(
+ net::URLRequestStatus::FAILED, net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -195,8 +186,8 @@ void URLRequestContentJob::DidOpen(int result) {
void URLRequestContentJob::DidSeek(int64 result) {
if (result != byte_range_.first_byte_position()) {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyStartError(net::URLRequestStatus(
+ net::URLRequestStatus::FAILED, net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -204,24 +195,16 @@ void URLRequestContentJob::DidSeek(int64 result) {
NotifyHeadersComplete();
}
-void URLRequestContentJob::DidRead(
- scoped_refptr<net::IOBuffer> buf, int result) {
- if (result > 0) {
- SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
- remaining_bytes_ -= result;
- DCHECK_GE(remaining_bytes_, 0);
- }
-
+void URLRequestContentJob::DidRead(int result) {
DCHECK(io_pending_);
io_pending_ = false;
- if (result == 0) {
- NotifyDone(net::URLRequestStatus());
- } else if (result < 0) {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ if (result > 0) {
+ remaining_bytes_ -= result;
+ DCHECK_GE(remaining_bytes_, 0);
}
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
}
} // namespace content
diff --git a/content/browser/android/url_request_content_job.h b/content/browser/android/url_request_content_job.h
index 5d3d933..dad9fbf 100644
--- a/content/browser/android/url_request_content_job.h
+++ b/content/browser/android/url_request_content_job.h
@@ -12,6 +12,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "content/common/content_export.h"
+#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_job.h"
@@ -42,7 +43,7 @@ class CONTENT_EXPORT URLRequestContentJob : public net::URLRequestJob {
// net::URLRequestJob:
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
bool GetMimeType(std::string* mime_type) const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
@@ -79,7 +80,7 @@ class CONTENT_EXPORT URLRequestContentJob : public net::URLRequestJob {
void DidSeek(int64 result);
// Callback after data is asynchronously read from the content URI into |buf|.
- void DidRead(scoped_refptr<net::IOBuffer> buf, int result);
+ void DidRead(int result);
// The full path of the content URI.
base::FilePath content_path_;
@@ -89,6 +90,7 @@ class CONTENT_EXPORT URLRequestContentJob : public net::URLRequestJob {
const scoped_refptr<base::TaskRunner> content_task_runner_;
net::HttpByteRange byte_range_;
+ net::Error range_parse_result_;
int64 remaining_bytes_;
bool io_pending_;
diff --git a/content/browser/appcache/appcache_url_request_job.cc b/content/browser/appcache/appcache_url_request_job.cc
index 5b06e6e..de924a1 100644
--- a/content/browser/appcache/appcache_url_request_job.cc
+++ b/content/browser/appcache/appcache_url_request_job.cc
@@ -341,7 +341,6 @@ void AppCacheURLRequestJob::SetupRangeResponse() {
void AppCacheURLRequestJob::OnReadComplete(int result) {
DCHECK(is_delivering_appcache_response());
if (result == 0) {
- NotifyDone(net::URLRequestStatus());
AppCacheHistograms::CountResponseRetrieval(
true, is_main_resource_, manifest_url_.GetOrigin());
} else if (result < 0) {
@@ -349,13 +348,10 @@ void AppCacheURLRequestJob::OnReadComplete(int result) {
storage_->service()->CheckAppCacheResponse(manifest_url_, cache_id_,
entry_.response_id());
}
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
AppCacheHistograms::CountResponseRetrieval(
false, is_main_resource_, manifest_url_.GetOrigin());
- } else {
- SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
}
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
}
// net::URLRequestJob overrides ------------------------------------------------
@@ -424,17 +420,14 @@ int AppCacheURLRequestJob::GetResponseCode() const {
return http_info()->headers->response_code();
}
-bool AppCacheURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
- int *bytes_read) {
+int AppCacheURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
DCHECK(is_delivering_appcache_response());
DCHECK_NE(buf_size, 0);
- DCHECK(bytes_read);
DCHECK(!reader_->IsReadPending());
- reader_->ReadData(
- buf, buf_size, base::Bind(&AppCacheURLRequestJob::OnReadComplete,
- base::Unretained(this)));
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- return false;
+ reader_->ReadData(buf, buf_size,
+ base::Bind(&AppCacheURLRequestJob::OnReadComplete,
+ base::Unretained(this)));
+ return net::ERR_IO_PENDING;
}
void AppCacheURLRequestJob::SetExtraRequestHeaders(
diff --git a/content/browser/appcache/appcache_url_request_job.h b/content/browser/appcache/appcache_url_request_job.h
index 95a3e18..8f51e7b 100644
--- a/content/browser/appcache/appcache_url_request_job.h
+++ b/content/browser/appcache/appcache_url_request_job.h
@@ -148,7 +148,7 @@ class CONTENT_EXPORT AppCacheURLRequestJob
net::LoadState GetLoadState() const override;
bool GetCharset(std::string* charset) override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
// Sets extra request headers for Job types that support request headers.
// This is how we get informed of range-requests.
diff --git a/content/browser/fileapi/file_writer_delegate_unittest.cc b/content/browser/fileapi/file_writer_delegate_unittest.cc
index c9bcfbb..9eb73b1 100644
--- a/content/browser/fileapi/file_writer_delegate_unittest.cc
+++ b/content/browser/fileapi/file_writer_delegate_unittest.cc
@@ -184,17 +184,15 @@ class FileWriterDelegateTestJob : public net::URLRequestJob {
base::Bind(&FileWriterDelegateTestJob::NotifyHeadersComplete, this));
}
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override {
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override {
if (remaining_bytes_ < buf_size)
- buf_size = static_cast<int>(remaining_bytes_);
+ buf_size = remaining_bytes_;
for (int i = 0; i < buf_size; ++i)
buf->data()[i] = content_[cursor_++];
remaining_bytes_ -= buf_size;
- SetStatus(net::URLRequestStatus());
- *bytes_read = buf_size;
- return true;
+ return buf_size;
}
int GetResponseCode() const override { return 200; }
diff --git a/content/browser/net/view_http_cache_job_factory.cc b/content/browser/net/view_http_cache_job_factory.cc
index 6b46b61..3f9570eed 100644
--- a/content/browser/net/view_http_cache_job_factory.cc
+++ b/content/browser/net/view_http_cache_job_factory.cc
@@ -10,6 +10,7 @@
#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/memory/weak_ptr.h"
+#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
#include "base/thread_task_runner_handle.h"
@@ -44,8 +45,8 @@ class ViewHttpCacheJob : public net::URLRequestJob {
bool GetCharset(std::string* charset) override {
return core_->GetCharset(charset);
}
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override {
- return core_->ReadRawData(buf, buf_size, bytes_read);
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override {
+ return core_->ReadRawData(buf, buf_size);
}
private:
@@ -65,7 +66,7 @@ class ViewHttpCacheJob : public net::URLRequestJob {
bool GetMimeType(std::string* mime_type) const;
bool GetCharset(std::string* charset);
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
+ int ReadRawData(net::IOBuffer* buf, int buf_size);
private:
friend class base::RefCounted<Core>;
@@ -164,17 +165,13 @@ bool ViewHttpCacheJob::Core::GetCharset(std::string* charset) {
return true;
}
-bool ViewHttpCacheJob::Core::ReadRawData(net::IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
- DCHECK(bytes_read);
- int remaining = static_cast<int>(data_.size()) - data_offset_;
+int ViewHttpCacheJob::Core::ReadRawData(net::IOBuffer* buf, int buf_size) {
+ int remaining = base::checked_cast<int>(data_.size()) - data_offset_;
if (buf_size > remaining)
buf_size = remaining;
memcpy(buf->data(), data_.data() + data_offset_, buf_size);
data_offset_ += buf_size;
- *bytes_read = buf_size;
- return true;
+ return buf_size;
}
void ViewHttpCacheJob::Core::OnIOComplete(int result) {
diff --git a/content/browser/service_worker/service_worker_read_from_cache_job.cc b/content/browser/service_worker/service_worker_read_from_cache_job.cc
index 062d475..6aa92af8 100644
--- a/content/browser/service_worker/service_worker_read_from_cache_job.cc
+++ b/content/browser/service_worker/service_worker_read_from_cache_job.cc
@@ -41,27 +41,9 @@ ServiceWorkerReadFromCacheJob::~ServiceWorkerReadFromCacheJob() {
}
void ServiceWorkerReadFromCacheJob::Start() {
- TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
- "ServiceWorkerReadFromCacheJob::ReadInfo",
- this,
- "URL", request_->url().spec());
- if (!context_) {
- NotifyStartError(net::URLRequestStatus(
- net::URLRequestStatus::FAILED, net::ERR_FAILED));
- return;
- }
-
- // Create a response reader and start reading the headers,
- // we'll continue when thats done.
- if (is_main_script())
- version_->embedded_worker()->OnScriptReadStarted();
- reader_ = context_->storage()->CreateResponseReader(resource_id_);
- http_info_io_buffer_ = new HttpResponseInfoIOBuffer;
- reader_->ReadInfo(
- http_info_io_buffer_.get(),
- base::Bind(&ServiceWorkerReadFromCacheJob::OnReadInfoComplete,
- weak_factory_.GetWeakPtr()));
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&ServiceWorkerReadFromCacheJob::StartAsync,
+ weak_factory_.GetWeakPtr()));
}
void ServiceWorkerReadFromCacheJob::Kill() {
@@ -123,22 +105,42 @@ void ServiceWorkerReadFromCacheJob::SetExtraRequestHeaders(
range_requested_ = ranges[0];
}
-bool ServiceWorkerReadFromCacheJob::ReadRawData(
- net::IOBuffer* buf,
- int buf_size,
- int *bytes_read) {
+int ServiceWorkerReadFromCacheJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size) {
DCHECK_NE(buf_size, 0);
- DCHECK(bytes_read);
DCHECK(!reader_->IsReadPending());
TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
"ServiceWorkerReadFromCacheJob::ReadRawData",
this,
"URL", request_->url().spec());
- reader_->ReadData(
- buf, buf_size, base::Bind(&ServiceWorkerReadFromCacheJob::OnReadComplete,
- weak_factory_.GetWeakPtr()));
+ reader_->ReadData(buf, buf_size,
+ base::Bind(&ServiceWorkerReadFromCacheJob::OnReadComplete,
+ weak_factory_.GetWeakPtr()));
+ return net::ERR_IO_PENDING;
+}
+
+void ServiceWorkerReadFromCacheJob::StartAsync() {
+ TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
+ "ServiceWorkerReadFromCacheJob::ReadInfo", this,
+ "URL", request_->url().spec());
+ if (!context_) {
+ // NotifyStartError is not safe to call synchronously in Start.
+ NotifyStartError(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
+ return;
+ }
+
+ // Create a response reader and start reading the headers,
+ // we'll continue when thats done.
+ if (is_main_script())
+ version_->embedded_worker()->OnScriptReadStarted();
+ reader_ = context_->storage()->CreateResponseReader(resource_id_);
+ http_info_io_buffer_ = new HttpResponseInfoIOBuffer;
+ reader_->ReadInfo(
+ http_info_io_buffer_.get(),
+ base::Bind(&ServiceWorkerReadFromCacheJob::OnReadInfoComplete,
+ weak_factory_.GetWeakPtr()));
SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- return false;
}
const net::HttpResponseInfo* ServiceWorkerReadFromCacheJob::http_info() const {
@@ -156,6 +158,8 @@ void ServiceWorkerReadFromCacheJob::OnReadInfoComplete(int result) {
ServiceWorkerMetrics::CountReadResponseResult(
ServiceWorkerMetrics::READ_HEADERS_ERROR);
Done(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
+ NotifyStartError(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
return;
}
DCHECK_GE(result, 0);
@@ -209,23 +213,22 @@ void ServiceWorkerReadFromCacheJob::Done(const net::URLRequestStatus& status) {
}
if (is_main_script())
version_->embedded_worker()->OnScriptReadFinished();
- NotifyDone(status);
}
void ServiceWorkerReadFromCacheJob::OnReadComplete(int result) {
ServiceWorkerMetrics::ReadResponseResult check_result;
- if (result == 0) {
+
+ if (result >= 0) {
check_result = ServiceWorkerMetrics::READ_OK;
- Done(net::URLRequestStatus());
- } else if (result < 0) {
+ if (result == 0)
+ Done(net::URLRequestStatus());
+ } else {
check_result = ServiceWorkerMetrics::READ_DATA_ERROR;
Done(net::URLRequestStatus(net::URLRequestStatus::FAILED, result));
- } else {
- check_result = ServiceWorkerMetrics::READ_OK;
- SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
}
+
ServiceWorkerMetrics::CountReadResponseResult(check_result);
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
TRACE_EVENT_ASYNC_END1("ServiceWorker",
"ServiceWorkerReadFromCacheJob::ReadRawData",
this,
diff --git a/content/browser/service_worker/service_worker_read_from_cache_job.h b/content/browser/service_worker/service_worker_read_from_cache_job.h
index fccde7a..a62893d 100644
--- a/content/browser/service_worker/service_worker_read_from_cache_job.h
+++ b/content/browser/service_worker/service_worker_read_from_cache_job.h
@@ -51,13 +51,14 @@ class CONTENT_EXPORT ServiceWorkerReadFromCacheJob
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
// Reader completion callbacks.
void OnReadInfoComplete(int result);
void OnReadComplete(int result);
// Helpers
+ void StartAsync();
const net::HttpResponseInfo* http_info() const;
bool is_range_request() const { return range_requested_.IsValid(); }
void SetupRangeResponse(int response_data_size);
diff --git a/content/browser/service_worker/service_worker_url_request_job.cc b/content/browser/service_worker/service_worker_url_request_job.cc
index 2572c93..7e7c699 100644
--- a/content/browser/service_worker/service_worker_url_request_job.cc
+++ b/content/browser/service_worker/service_worker_url_request_job.cc
@@ -203,49 +203,44 @@ void ServiceWorkerURLRequestJob::SetExtraRequestHeaders(
byte_range_ = ranges[0];
}
-bool ServiceWorkerURLRequestJob::ReadRawData(
- net::IOBuffer* buf, int buf_size, int *bytes_read) {
+int ServiceWorkerURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
DCHECK(buf);
DCHECK_GE(buf_size, 0);
- DCHECK(bytes_read);
DCHECK(waiting_stream_url_.is_empty());
+
+ int bytes_read = 0;
+
if (stream_.get()) {
- switch (stream_->ReadRawData(buf, buf_size, bytes_read)) {
+ switch (stream_->ReadRawData(buf, buf_size, &bytes_read)) {
case Stream::STREAM_HAS_DATA:
- DCHECK_GT(*bytes_read, 0);
- return true;
+ DCHECK_GT(bytes_read, 0);
+ return bytes_read;
case Stream::STREAM_COMPLETE:
- DCHECK(!*bytes_read);
+ DCHECK_EQ(0, bytes_read);
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_STREAM_RESPONSE);
- return true;
+ return 0;
case Stream::STREAM_EMPTY:
stream_pending_buffer_ = buf;
stream_pending_buffer_size_ = buf_size;
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- return false;
+ return net::ERR_IO_PENDING;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_ERROR_STREAM_ABORTED);
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_CONNECTION_RESET));
- return false;
+ return net::ERR_CONNECTION_RESET;
}
NOTREACHED();
- return false;
+ return net::ERR_FAILED;
}
- if (!blob_request_) {
- *bytes_read = 0;
- return true;
- }
- blob_request_->Read(buf, buf_size, bytes_read);
+ if (!blob_request_)
+ return 0;
+ blob_request_->Read(buf, buf_size, &bytes_read);
net::URLRequestStatus status = blob_request_->status();
- SetStatus(status);
- if (status.is_io_pending())
- return false;
- if (status.is_success() && *bytes_read == 0)
+ if (status.status() != net::URLRequestStatus::SUCCESS)
+ return status.error();
+ if (bytes_read == 0)
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_BLOB_RESPONSE);
- return status.is_success();
+ return bytes_read;
}
// TODO(falken): Refactor Blob and Stream specific handling to separate classes.
@@ -291,29 +286,18 @@ void ServiceWorkerURLRequestJob::OnResponseStarted(net::URLRequest* request) {
void ServiceWorkerURLRequestJob::OnReadCompleted(net::URLRequest* request,
int bytes_read) {
- SetStatus(request->status());
if (!request->status().is_success()) {
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_ERROR_BLOB_READ);
- NotifyDone(request->status());
- return;
- }
-
- if (bytes_read == 0) {
- // Protect because NotifyReadComplete() can destroy |this|.
- scoped_refptr<ServiceWorkerURLRequestJob> protect(this);
+ } else if (bytes_read == 0) {
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_BLOB_RESPONSE);
- NotifyReadComplete(bytes_read);
- NotifyDone(request->status());
- return;
}
- NotifyReadComplete(bytes_read);
+ net::URLRequestStatus status = request->status();
+ ReadRawDataComplete(status.is_success() ? bytes_read : status.error());
}
// Overrides for Stream reading -----------------------------------------------
void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
- // Clear the IO_PENDING status.
- SetStatus(net::URLRequestStatus());
// Do nothing if stream_pending_buffer_ is empty, i.e. there's no ReadRawData
// operation waiting for IO completion.
if (!stream_pending_buffer_.get())
@@ -322,15 +306,15 @@ void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
// stream_pending_buffer_ is set to the IOBuffer instance provided to
// ReadRawData() by URLRequestJob.
- int bytes_read = 0;
- switch (stream_->ReadRawData(
- stream_pending_buffer_.get(), stream_pending_buffer_size_, &bytes_read)) {
+ int result = 0;
+ switch (stream_->ReadRawData(stream_pending_buffer_.get(),
+ stream_pending_buffer_size_, &result)) {
case Stream::STREAM_HAS_DATA:
- DCHECK_GT(bytes_read, 0);
+ DCHECK_GT(result, 0);
break;
case Stream::STREAM_COMPLETE:
// Calling NotifyReadComplete with 0 signals completion.
- DCHECK(!bytes_read);
+ DCHECK(!result);
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_STREAM_RESPONSE);
break;
case Stream::STREAM_EMPTY:
@@ -338,9 +322,8 @@ void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
break;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
+ result = net::ERR_CONNECTION_RESET;
RecordResult(ServiceWorkerMetrics::REQUEST_JOB_ERROR_STREAM_ABORTED);
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_CONNECTION_RESET));
break;
}
@@ -348,7 +331,7 @@ void ServiceWorkerURLRequestJob::OnDataAvailable(Stream* stream) {
// safe for the observer to read.
stream_pending_buffer_ = nullptr;
stream_pending_buffer_size_ = 0;
- NotifyReadComplete(bytes_read);
+ ReadRawDataComplete(result);
}
void ServiceWorkerURLRequestJob::OnStreamRegistered(Stream* stream) {
@@ -644,7 +627,7 @@ void ServiceWorkerURLRequestJob::DidDispatchFetchEvent(
// error.
if (response.status_code == 0) {
RecordStatusZeroResponseError(response.error);
- NotifyDone(
+ NotifyStartError(
net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
return;
}
diff --git a/content/browser/service_worker/service_worker_url_request_job.h b/content/browser/service_worker/service_worker_url_request_job.h
index 4815c0b..2a50c94 100644
--- a/content/browser/service_worker/service_worker_url_request_job.h
+++ b/content/browser/service_worker/service_worker_url_request_job.h
@@ -87,7 +87,7 @@ class CONTENT_EXPORT ServiceWorkerURLRequestJob
void GetLoadTimingInfo(net::LoadTimingInfo* load_timing_info) const override;
int GetResponseCode() const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
// net::URLRequest::Delegate overrides that read the blob from the
// ServiceWorkerFetchResponse.
diff --git a/content/browser/service_worker/service_worker_write_to_cache_job.cc b/content/browser/service_worker/service_worker_write_to_cache_job.cc
index 27d545e5..0cc9f88 100644
--- a/content/browser/service_worker/service_worker_write_to_cache_job.cc
+++ b/content/browser/service_worker/service_worker_write_to_cache_job.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/strings/stringprintf.h"
+#include "base/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "content/browser/service_worker/service_worker_cache_writer.h"
#include "content/browser/service_worker/service_worker_context_core.h"
@@ -47,7 +48,7 @@ const char kServiceWorkerAllowed[] = "Service-Worker-Allowed";
// developers.
// TODO(falken): Redesign this class so we don't have to fail at the network
// stack layer just to cancel the update.
-const int kIdenticalScriptError = net::ERR_FILE_EXISTS;
+const net::Error kIdenticalScriptError = net::ERR_FILE_EXISTS;
} // namespace
@@ -79,13 +80,20 @@ ServiceWorkerWriteToCacheJob::~ServiceWorkerWriteToCacheJob() {
}
void ServiceWorkerWriteToCacheJob::Start() {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&ServiceWorkerWriteToCacheJob::StartAsync,
+ weak_factory_.GetWeakPtr()));
+}
+
+void ServiceWorkerWriteToCacheJob::StartAsync() {
TRACE_EVENT_ASYNC_BEGIN1("ServiceWorker",
"ServiceWorkerWriteToCacheJob::ExecutingJob",
this,
"URL", request_->url().spec());
if (!context_) {
- NotifyStartError(net::URLRequestStatus(
- net::URLRequestStatus::FAILED, net::ERR_FAILED));
+ // NotifyStartError is not safe to call synchronously in Start().
+ NotifyStartError(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
return;
}
@@ -108,8 +116,9 @@ void ServiceWorkerWriteToCacheJob::Kill() {
has_been_killed_ = true;
net_request_.reset();
if (did_notify_started_) {
- NotifyFinishedCaching(net::URLRequestStatus::FromError(net::ERR_ABORTED),
- kKilledError);
+ net::Error error = NotifyFinishedCaching(
+ net::URLRequestStatus::FromError(net::ERR_ABORTED), kKilledError);
+ DCHECK_EQ(net::ERR_ABORTED, error);
}
writer_.reset();
context_.reset();
@@ -156,40 +165,20 @@ void ServiceWorkerWriteToCacheJob::SetExtraRequestHeaders(
net_request_->SetExtraRequestHeaders(headers);
}
-bool ServiceWorkerWriteToCacheJob::ReadRawData(net::IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
- net::URLRequestStatus status = ReadNetData(buf, buf_size, bytes_read);
- SetStatus(status);
+int ServiceWorkerWriteToCacheJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size) {
+ int bytes_read = 0;
+ net::URLRequestStatus status = ReadNetData(buf, buf_size, &bytes_read);
if (status.is_io_pending())
- return false;
-
- if (!status.is_success()) {
- NotifyDoneHelper(status, kFetchScriptError);
- return false;
- }
-
- HandleNetData(*bytes_read);
- status = GetStatus();
-
- // Synchronous EOFs that do not replace the incumbent entry are considered
- // failures. Since normally the URLRequestJob's status would be set by
- // ReadNetData or HandleNetData, this code has to manually fix up the status
- // to match the failure this function is about to return.
- if (status.status() == net::URLRequestStatus::SUCCESS &&
- *bytes_read == 0 && !cache_writer_->did_replace()) {
- status = net::URLRequestStatus::FromError(kIdenticalScriptError);
- }
+ return net::ERR_IO_PENDING;
if (!status.is_success()) {
- NotifyDoneHelper(status, "");
- return false;
+ net::Error error = NotifyFinishedCaching(status, kFetchScriptError);
+ DCHECK_EQ(status.error(), error);
+ return error;
}
- // Since URLRequestStatus::is_success() means "SUCCESS or IO_PENDING", but the
- // contract of this function is "return true for synchronous successes only",
- // it is important to test against SUCCESS explicitly here.
- return status.status() == net::URLRequestStatus::SUCCESS;
+ return HandleNetData(bytes_read);
}
const net::HttpResponseInfo* ServiceWorkerWriteToCacheJob::http_info() const {
@@ -244,9 +233,9 @@ void ServiceWorkerWriteToCacheJob::OnReceivedRedirect(
TRACE_EVENT0("ServiceWorker",
"ServiceWorkerWriteToCacheJob::OnReceivedRedirect");
// Script resources can't redirect.
- NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_UNSAFE_REDIRECT),
- kRedirectError);
+ NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_UNSAFE_REDIRECT),
+ kRedirectError);
}
void ServiceWorkerWriteToCacheJob::OnAuthRequired(
@@ -256,7 +245,7 @@ void ServiceWorkerWriteToCacheJob::OnAuthRequired(
TRACE_EVENT0("ServiceWorker",
"ServiceWorkerWriteToCacheJob::OnAuthRequired");
// TODO(michaeln): Pass this thru to our jobs client.
- NotifyDoneHelper(
+ NotifyStartErrorHelper(
net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED),
kClientAuthenticationError);
}
@@ -269,7 +258,7 @@ void ServiceWorkerWriteToCacheJob::OnCertificateRequested(
"ServiceWorkerWriteToCacheJob::OnCertificateRequested");
// TODO(michaeln): Pass this thru to our jobs client.
// see NotifyCertificateRequested.
- NotifyDoneHelper(
+ NotifyStartErrorHelper(
net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED),
kClientAuthenticationError);
}
@@ -283,9 +272,9 @@ void ServiceWorkerWriteToCacheJob::OnSSLCertificateError(
"ServiceWorkerWriteToCacheJob::OnSSLCertificateError");
// TODO(michaeln): Pass this thru to our jobs client,
// see NotifySSLCertificateError.
- NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- kSSLError);
+ NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ kSSLError);
}
void ServiceWorkerWriteToCacheJob::OnBeforeNetworkStart(
@@ -301,15 +290,15 @@ void ServiceWorkerWriteToCacheJob::OnResponseStarted(
net::URLRequest* request) {
DCHECK_EQ(net_request_, request);
if (!request->status().is_success()) {
- NotifyDoneHelper(request->status(), kFetchScriptError);
+ NotifyStartErrorHelper(request->status(), kFetchScriptError);
return;
}
if (request->GetResponseCode() / 100 != 2) {
std::string error_message =
base::StringPrintf(kBadHTTPResponseError, request->GetResponseCode());
- NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INVALID_RESPONSE),
- error_message);
+ NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INVALID_RESPONSE),
+ error_message);
// TODO(michaeln): Instead of error'ing immediately, send the net
// response to our consumer, just don't cache it?
return;
@@ -320,9 +309,10 @@ void ServiceWorkerWriteToCacheJob::OnResponseStarted(
const net::HttpNetworkSession::Params* session_params =
request->context()->GetNetworkSessionParams();
if (!session_params || !session_params->ignore_certificate_errors) {
- NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- kSSLError);
+ NotifyStartErrorHelper(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ kSSLError);
return;
}
}
@@ -337,9 +327,10 @@ void ServiceWorkerWriteToCacheJob::OnResponseStarted(
mime_type.empty()
? kNoMIMEError
: base::StringPrintf(kBadMIMEError, mime_type.c_str());
- NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- error_message);
+ NotifyStartErrorHelper(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ error_message);
return;
}
@@ -375,53 +366,31 @@ void ServiceWorkerWriteToCacheJob::OnWriteHeadersComplete(net::Error error) {
NotifyHeadersComplete();
}
-void ServiceWorkerWriteToCacheJob::HandleNetData(int bytes_read) {
- io_buffer_bytes_ = bytes_read;
- net::Error error = cache_writer_->MaybeWriteData(
- io_buffer_.get(), bytes_read,
- base::Bind(&ServiceWorkerWriteToCacheJob::OnWriteDataComplete,
- weak_factory_.GetWeakPtr()));
- SetStatus(net::URLRequestStatus::FromError(error));
-
- // In case of ERR_IO_PENDING, this logic is done in OnWriteDataComplete.
- if (error != net::ERR_IO_PENDING && bytes_read == 0) {
- NotifyFinishedCaching(net::URLRequestStatus::FromError(error),
- std::string());
- }
-}
-
void ServiceWorkerWriteToCacheJob::OnWriteDataComplete(net::Error error) {
SetStatus(net::URLRequestStatus::FromError(error));
DCHECK_NE(net::ERR_IO_PENDING, error);
- if (io_buffer_bytes_ == 0) {
- NotifyDoneHelper(net::URLRequestStatus::FromError(error), std::string());
- }
- NotifyReadComplete(error == net::OK ? io_buffer_bytes_ : error);
+ if (io_buffer_bytes_ == 0)
+ error = NotifyFinishedCaching(net::URLRequestStatus::FromError(error), "");
+ ReadRawDataComplete(error == net::OK ? io_buffer_bytes_ : error);
}
-void ServiceWorkerWriteToCacheJob::OnReadCompleted(
- net::URLRequest* request,
- int bytes_read) {
+void ServiceWorkerWriteToCacheJob::OnReadCompleted(net::URLRequest* request,
+ int result) {
DCHECK_EQ(net_request_, request);
- if (bytes_read < 0) {
+ DCHECK_NE(result, net::ERR_IO_PENDING);
+
+ if (result < 0) {
DCHECK(!request->status().is_success());
- NotifyDoneHelper(request->status(), kFetchScriptError);
- return;
- }
- HandleNetData(bytes_read);
- // HandleNetData can cause status of this job to change. If the status changes
- // to IO_PENDING, that means HandleNetData has pending IO, and
- // NotifyReadComplete will be called later by the appropriate callback.
- if (!GetStatus().is_io_pending()) {
- int result = GetStatus().status() == net::URLRequestStatus::SUCCESS
- ? bytes_read
- : GetStatus().error();
- // If bytes_read is 0, HandleNetData synchronously completed and this job is
- // at EOF.
- if (bytes_read == 0)
- NotifyDoneHelper(GetStatus(), std::string());
- NotifyReadComplete(result);
+ result = NotifyFinishedCaching(request->status(), kFetchScriptError);
+ } else {
+ result = HandleNetData(result);
}
+
+ // ReadRawDataComplete will be called in OnWriteDataComplete, so return early.
+ if (result == net::ERR_IO_PENDING)
+ return;
+
+ ReadRawDataComplete(result);
}
bool ServiceWorkerWriteToCacheJob::CheckPathRestriction(
@@ -435,43 +404,57 @@ bool ServiceWorkerWriteToCacheJob::CheckPathRestriction(
if (!ServiceWorkerUtils::IsPathRestrictionSatisfied(
version_->scope(), url_,
has_header ? &service_worker_allowed : nullptr, &error_message)) {
- NotifyDoneHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_INSECURE_RESPONSE),
- error_message);
+ NotifyStartErrorHelper(net::URLRequestStatus(net::URLRequestStatus::FAILED,
+ net::ERR_INSECURE_RESPONSE),
+ error_message);
return false;
}
return true;
}
-void ServiceWorkerWriteToCacheJob::NotifyDoneHelper(
+int ServiceWorkerWriteToCacheJob::HandleNetData(int bytes_read) {
+ io_buffer_bytes_ = bytes_read;
+ net::Error error = cache_writer_->MaybeWriteData(
+ io_buffer_.get(), bytes_read,
+ base::Bind(&ServiceWorkerWriteToCacheJob::OnWriteDataComplete,
+ weak_factory_.GetWeakPtr()));
+
+ // In case of ERR_IO_PENDING, this logic is done in OnWriteDataComplete.
+ if (error != net::ERR_IO_PENDING && bytes_read == 0) {
+ error = NotifyFinishedCaching(net::URLRequestStatus::FromError(error),
+ std::string());
+ }
+ return error == net::OK ? bytes_read : error;
+}
+
+void ServiceWorkerWriteToCacheJob::NotifyStartErrorHelper(
const net::URLRequestStatus& status,
const std::string& status_message) {
DCHECK(!status.is_io_pending());
- // Note that NotifyFinishedCaching has logic in it to detect the special case
- // mentioned below as well.
- NotifyFinishedCaching(status, status_message);
+ net::Error error = NotifyFinishedCaching(status, status_message);
+ // The special case mentioned in NotifyFinishedCaching about script being
+ // identical does not apply here, since the entire body needs to be read
+ // before this is relevant.
+ DCHECK_EQ(status.error(), error);
net::URLRequestStatus reported_status = status;
std::string reported_status_message = status_message;
- // A strange special case: requests that successfully fetch the entire
- // ServiceWorker and write it back, but which did not replace the incumbent
- // script because the new script was identical, are considered to have failed.
- if (status.is_success() && !cache_writer_->did_replace()) {
- reported_status = net::URLRequestStatus::FromError(kIdenticalScriptError);
- reported_status_message = "";
- }
-
SetStatus(reported_status);
- NotifyDone(reported_status);
+ NotifyStartError(reported_status);
}
-void ServiceWorkerWriteToCacheJob::NotifyFinishedCaching(
+net::Error ServiceWorkerWriteToCacheJob::NotifyFinishedCaching(
net::URLRequestStatus status,
const std::string& status_message) {
+ net::Error result = static_cast<net::Error>(status.error());
if (did_notify_finished_)
- return;
+ return result;
+
+ int size = -1;
+ if (status.is_success())
+ size = cache_writer_->bytes_written();
// If all the calls to MaybeWriteHeaders/MaybeWriteData succeeded, but the
// incumbent entry wasn't actually replaced because the new entry was
@@ -479,17 +462,18 @@ void ServiceWorkerWriteToCacheJob::NotifyFinishedCaching(
// exists.
if (status.status() == net::URLRequestStatus::SUCCESS &&
!cache_writer_->did_replace()) {
- status = net::URLRequestStatus::FromError(kIdenticalScriptError);
+ result = kIdenticalScriptError;
+ status = net::URLRequestStatus::FromError(result);
version_->SetStartWorkerStatusCode(SERVICE_WORKER_ERROR_EXISTS);
+ version_->script_cache_map()->NotifyFinishedCaching(url_, size, status,
+ std::string());
+ } else {
+ version_->script_cache_map()->NotifyFinishedCaching(url_, size, status,
+ status_message);
}
- int size = -1;
- if (status.is_success())
- size = cache_writer_->bytes_written();
-
- version_->script_cache_map()->NotifyFinishedCaching(url_, size, status,
- status_message);
did_notify_finished_ = true;
+ return result;
}
scoped_ptr<ServiceWorkerResponseReader>
diff --git a/content/browser/service_worker/service_worker_write_to_cache_job.h b/content/browser/service_worker/service_worker_write_to_cache_job.h
index 9227239..e614912 100644
--- a/content/browser/service_worker/service_worker_write_to_cache_job.h
+++ b/content/browser/service_worker/service_worker_write_to_cache_job.h
@@ -64,6 +64,7 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
// net::URLRequestJob overrides
void Start() override;
+ void StartAsync();
void Kill() override;
net::LoadState GetLoadState() const override;
bool GetCharset(std::string* charset) override;
@@ -71,7 +72,7 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
const net::HttpResponseInfo* http_info() const;
@@ -109,18 +110,22 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
bool CheckPathRestriction(net::URLRequest* request);
// Writes network data back to the script cache if needed, and notifies the
- // script cache of fetch completion at EOF. This function might need to do
- // asynchronous IO; if so, it signals this through setting the URLRequestJob's
- // status to IO_PENDING. After this function returns, if the URLRequestJob
- // isn't IO_PENDING, all of the data in |io_buffer_| has been written back to
- // the script cache if necessary.
- void HandleNetData(int bytes_read);
-
- void NotifyDoneHelper(const net::URLRequestStatus& status,
- const std::string& status_message);
-
- void NotifyFinishedCaching(net::URLRequestStatus status,
- const std::string& status_message);
+ // script cache of fetch completion at EOF. This function returns
+ // net::IO_PENDING if the IO is to be completed asynchronously, returns a
+ // negative number that represents a corresponding net error code (other than
+ // net::IO_PENDING) if an error occurred, or returns a non-negative number
+ // that represents the number of network bytes read. If the return value is
+ // non-negative, all of the data in |io_buffer_| has been written back to the
+ // script cache if necessary.
+ int HandleNetData(int bytes_read);
+
+ void NotifyStartErrorHelper(const net::URLRequestStatus& status,
+ const std::string& status_message);
+
+ // Returns an error code that is passed in through |status| or a new one if an
+ // additional error is found.
+ net::Error NotifyFinishedCaching(net::URLRequestStatus status,
+ const std::string& status_message);
scoped_ptr<ServiceWorkerResponseReader> CreateCacheResponseReader();
scoped_ptr<ServiceWorkerResponseWriter> CreateCacheResponseWriter();
@@ -140,6 +145,7 @@ class CONTENT_EXPORT ServiceWorkerWriteToCacheJob
bool has_been_killed_;
bool did_notify_started_;
bool did_notify_finished_;
+
base::WeakPtrFactory<ServiceWorkerWriteToCacheJob> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(ServiceWorkerWriteToCacheJob);
diff --git a/content/browser/streams/stream_url_request_job.cc b/content/browser/streams/stream_url_request_job.cc
index d2bce89..e26fe35 100644
--- a/content/browser/streams/stream_url_request_job.cc
+++ b/content/browser/streams/stream_url_request_job.cc
@@ -40,8 +40,6 @@ StreamURLRequestJob::~StreamURLRequestJob() {
}
void StreamURLRequestJob::OnDataAvailable(Stream* stream) {
- // Clear the IO_PENDING status.
- SetStatus(net::URLRequestStatus());
// Do nothing if pending_buffer_ is empty, i.e. there's no ReadRawData()
// operation waiting for IO completion.
if (!pending_buffer_.get())
@@ -50,24 +48,22 @@ void StreamURLRequestJob::OnDataAvailable(Stream* stream) {
// pending_buffer_ is set to the IOBuffer instance provided to ReadRawData()
// by URLRequestJob.
- int bytes_read;
- switch (stream_->ReadRawData(
- pending_buffer_.get(), pending_buffer_size_, &bytes_read)) {
+ int result = 0;
+ switch (stream_->ReadRawData(pending_buffer_.get(), pending_buffer_size_,
+ &result)) {
case Stream::STREAM_HAS_DATA:
- DCHECK_GT(bytes_read, 0);
+ DCHECK_GT(result, 0);
break;
case Stream::STREAM_COMPLETE:
- // Ensure this. Calling NotifyReadComplete call with 0 signals
- // completion.
- bytes_read = 0;
+ // Ensure ReadRawData gives net::OK.
+ DCHECK_EQ(net::OK, result);
break;
case Stream::STREAM_EMPTY:
NOTREACHED();
break;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_CONNECTION_RESET));
+ result = net::ERR_CONNECTION_RESET;
break;
}
@@ -76,8 +72,9 @@ void StreamURLRequestJob::OnDataAvailable(Stream* stream) {
pending_buffer_ = NULL;
pending_buffer_size_ = 0;
- total_bytes_read_ += bytes_read;
- NotifyReadComplete(bytes_read);
+ if (result > 0)
+ total_bytes_read_ += result;
+ ReadRawDataComplete(result);
}
// net::URLRequestJob methods.
@@ -94,43 +91,40 @@ void StreamURLRequestJob::Kill() {
ClearStream();
}
-bool StreamURLRequestJob::ReadRawData(net::IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
+int StreamURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
+ // TODO(ellyjones): This is not right. The old code returned true here, but
+ // ReadRawData's old contract was to return true only for synchronous
+ // successes, which had the effect of treating all errors as synchronous EOFs.
+ // See https://crbug.com/508957
if (request_failed_)
- return true;
+ return 0;
DCHECK(buf);
- DCHECK(bytes_read);
int to_read = buf_size;
if (max_range_ && to_read) {
if (to_read + total_bytes_read_ > max_range_)
to_read = max_range_ - total_bytes_read_;
- if (to_read <= 0) {
- *bytes_read = 0;
- return true;
- }
+ if (to_read == 0)
+ return 0;
}
- switch (stream_->ReadRawData(buf, to_read, bytes_read)) {
+ int bytes_read = 0;
+ switch (stream_->ReadRawData(buf, to_read, &bytes_read)) {
case Stream::STREAM_HAS_DATA:
case Stream::STREAM_COMPLETE:
- total_bytes_read_ += *bytes_read;
- return true;
+ total_bytes_read_ += bytes_read;
+ return bytes_read;
case Stream::STREAM_EMPTY:
pending_buffer_ = buf;
pending_buffer_size_ = to_read;
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- return false;
+ return net::ERR_IO_PENDING;
case Stream::STREAM_ABORTED:
// Handle this as connection reset.
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_CONNECTION_RESET));
- return false;
+ return net::ERR_CONNECTION_RESET;
}
NOTREACHED();
- return false;
+ return net::ERR_FAILED;
}
bool StreamURLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -189,13 +183,8 @@ void StreamURLRequestJob::DidStart() {
void StreamURLRequestJob::NotifyFailure(int error_code) {
request_failed_ = true;
- // If we already return the headers on success, we can't change the headers
- // now. Instead, we just error out.
- if (headers_set_) {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- error_code));
- return;
- }
+ // This method can only be called before headers are set.
+ DCHECK(!headers_set_);
// TODO(zork): Share these with BlobURLRequestJob.
net::HttpStatusCode status_code = net::HTTP_INTERNAL_SERVER_ERROR;
diff --git a/content/browser/streams/stream_url_request_job.h b/content/browser/streams/stream_url_request_job.h
index 05c9551..f22311d 100644
--- a/content/browser/streams/stream_url_request_job.h
+++ b/content/browser/streams/stream_url_request_job.h
@@ -29,7 +29,7 @@ class CONTENT_EXPORT StreamURLRequestJob
// net::URLRequestJob methods.
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
diff --git a/content/browser/webui/url_data_manager_backend.cc b/content/browser/webui/url_data_manager_backend.cc
index ac346dd..485aff6 100644
--- a/content/browser/webui/url_data_manager_backend.cc
+++ b/content/browser/webui/url_data_manager_backend.cc
@@ -119,7 +119,7 @@ class URLRequestChromeJob : public net::URLRequestJob {
// net::URLRequestJob implementation.
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
bool GetMimeType(std::string* mime_type) const override;
int GetResponseCode() const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
@@ -190,8 +190,9 @@ class URLRequestChromeJob : public net::URLRequestJob {
bool RequiresUnsafeEval() const;
// Do the actual copy from data_ (the data we're serving) into |buf|.
- // Separate from ReadRawData so we can handle async I/O.
- void CompleteRead(net::IOBuffer* buf, int buf_size, int* bytes_read);
+ // Separate from ReadRawData so we can handle async I/O. Returns the number of
+ // bytes read.
+ int CompleteRead(net::IOBuffer* buf, int buf_size);
// The actual data we're serving. NULL until it's been fetched.
scoped_refptr<base::RefCountedMemory> data_;
@@ -336,22 +337,16 @@ void URLRequestChromeJob::MimeTypeAvailable(const std::string& mime_type) {
void URLRequestChromeJob::DataAvailable(base::RefCountedMemory* bytes) {
TRACE_EVENT_ASYNC_END0("browser", "DataManager:Request", this);
if (bytes) {
- // The request completed, and we have all the data.
- // Clear any IO pending status.
- SetStatus(net::URLRequestStatus());
-
data_ = bytes;
- int bytes_read;
if (pending_buf_.get()) {
CHECK(pending_buf_->data());
- CompleteRead(pending_buf_.get(), pending_buf_size_, &bytes_read);
+ int result = CompleteRead(pending_buf_.get(), pending_buf_size_);
pending_buf_ = NULL;
- NotifyReadComplete(bytes_read);
+ ReadRawDataComplete(result);
}
} else {
// The request failed.
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_FAILED));
+ ReadRawDataComplete(net::ERR_FAILED);
}
}
@@ -359,25 +354,21 @@ base::WeakPtr<URLRequestChromeJob> URLRequestChromeJob::AsWeakPtr() {
return weak_factory_.GetWeakPtr();
}
-bool URLRequestChromeJob::ReadRawData(net::IOBuffer* buf, int buf_size,
- int* bytes_read) {
+int URLRequestChromeJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
if (!data_.get()) {
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
DCHECK(!pending_buf_.get());
CHECK(buf->data());
pending_buf_ = buf;
pending_buf_size_ = buf_size;
- return false; // Tell the caller we're still waiting for data.
+ return net::ERR_IO_PENDING;
}
// Otherwise, the data is available.
- CompleteRead(buf, buf_size, bytes_read);
- return true;
+ return CompleteRead(buf, buf_size);
}
-void URLRequestChromeJob::CompleteRead(net::IOBuffer* buf, int buf_size,
- int* bytes_read) {
- int remaining = static_cast<int>(data_->size()) - data_offset_;
+int URLRequestChromeJob::CompleteRead(net::IOBuffer* buf, int buf_size) {
+ int remaining = data_->size() - data_offset_;
if (buf_size > remaining)
buf_size = remaining;
if (buf_size > 0) {
@@ -389,7 +380,7 @@ void URLRequestChromeJob::CompleteRead(net::IOBuffer* buf, int buf_size,
memcpy(buf->data(), data_->front() + data_offset_, buf_size);
data_offset_ += buf_size;
}
- *bytes_read = buf_size;
+ return buf_size;
}
void URLRequestChromeJob::CheckStoragePartitionMatches(
diff --git a/content/test/net/url_request_abort_on_end_job.cc b/content/test/net/url_request_abort_on_end_job.cc
index f7ff5a9..6d237a8 100644
--- a/content/test/net/url_request_abort_on_end_job.cc
+++ b/content/test/net/url_request_abort_on_end_job.cc
@@ -8,6 +8,7 @@
#include "base/compiler_specific.h"
#include "base/location.h"
+#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
#include "base/thread_task_runner_handle.h"
@@ -111,20 +112,16 @@ void URLRequestAbortOnEndJob::Start() {
weak_factory_.GetWeakPtr()));
}
-bool URLRequestAbortOnEndJob::ReadRawData(net::IOBuffer* buf,
- const int max_bytes,
- int* bytes_read) {
+int URLRequestAbortOnEndJob::ReadRawData(net::IOBuffer* buf, int max_bytes) {
if (!sent_data_) {
- *bytes_read = std::min(size_t(max_bytes), sizeof(kPageContent));
- std::memcpy(buf->data(), kPageContent, *bytes_read);
+ max_bytes =
+ std::min(max_bytes, base::checked_cast<int>(sizeof(kPageContent)));
+ std::memcpy(buf->data(), kPageContent, max_bytes);
sent_data_ = true;
- return true;
+ return max_bytes;
}
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- net::ERR_CONNECTION_ABORTED));
- *bytes_read = -1;
- return false;
+ return net::ERR_CONNECTION_ABORTED;
}
} // namespace content
diff --git a/content/test/net/url_request_abort_on_end_job.h b/content/test/net/url_request_abort_on_end_job.h
index 88e348c..8a95533 100644
--- a/content/test/net/url_request_abort_on_end_job.h
+++ b/content/test/net/url_request_abort_on_end_job.h
@@ -29,7 +29,7 @@ class URLRequestAbortOnEndJob : public net::URLRequestJob {
void Start() override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
static void AddUrlHandler();
diff --git a/ios/web/webui/url_data_manager_ios_backend.cc b/ios/web/webui/url_data_manager_ios_backend.cc
index cc3e3e9..d5117c2 100644
--- a/ios/web/webui/url_data_manager_ios_backend.cc
+++ b/ios/web/webui/url_data_manager_ios_backend.cc
@@ -96,7 +96,7 @@ class URLRequestChromeJob : public net::URLRequestJob {
// net::URLRequestJob implementation.
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
bool GetMimeType(std::string* mime_type) const override;
int GetResponseCode() const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
@@ -142,7 +142,7 @@ class URLRequestChromeJob : public net::URLRequestJob {
// Do the actual copy from data_ (the data we're serving) into |buf|.
// Separate from ReadRawData so we can handle async I/O.
- void CompleteRead(net::IOBuffer* buf, int buf_size, int* bytes_read);
+ int CompleteRead(net::IOBuffer* buf, int buf_size);
// The actual data we're serving. NULL until it's been fetched.
scoped_refptr<base::RefCountedMemory> data_;
@@ -291,58 +291,46 @@ void URLRequestChromeJob::MimeTypeAvailable(const std::string& mime_type) {
void URLRequestChromeJob::DataAvailable(base::RefCountedMemory* bytes) {
TRACE_EVENT_ASYNC_END0("browser", "DataManager:Request", this);
if (bytes) {
- // The request completed, and we have all the data.
- // Clear any IO pending status.
- SetStatus(net::URLRequestStatus());
-
data_ = bytes;
- int bytes_read;
if (pending_buf_.get()) {
CHECK(pending_buf_->data());
- CompleteRead(pending_buf_.get(), pending_buf_size_, &bytes_read);
+ int rv = CompleteRead(pending_buf_.get(), pending_buf_size_);
pending_buf_ = NULL;
- NotifyReadComplete(bytes_read);
+ ReadRawDataComplete(rv);
}
} else {
- // The request failed.
- NotifyDone(
- net::URLRequestStatus(net::URLRequestStatus::FAILED, net::ERR_FAILED));
+ ReadRawDataComplete(net::ERR_FAILED);
}
}
-bool URLRequestChromeJob::ReadRawData(net::IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
+int URLRequestChromeJob::ReadRawData(net::IOBuffer* buf, int buf_size) {
if (!data_.get()) {
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
DCHECK(!pending_buf_.get());
CHECK(buf->data());
pending_buf_ = buf;
pending_buf_size_ = buf_size;
- return false; // Tell the caller we're still waiting for data.
+ return net::ERR_IO_PENDING; // Tell the caller we're still waiting for
+ // data.
}
// Otherwise, the data is available.
- CompleteRead(buf, buf_size, bytes_read);
- return true;
+ return CompleteRead(buf, buf_size);
}
-void URLRequestChromeJob::CompleteRead(net::IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
+int URLRequestChromeJob::CompleteRead(net::IOBuffer* buf, int buf_size) {
// http://crbug.com/373841
char url_buf[128];
base::strlcpy(url_buf, request_->url().spec().c_str(), arraysize(url_buf));
base::debug::Alias(url_buf);
- int remaining = static_cast<int>(data_->size()) - data_offset_;
+ int remaining = data_->size() - data_offset_;
if (buf_size > remaining)
buf_size = remaining;
if (buf_size > 0) {
memcpy(buf->data(), data_->front() + data_offset_, buf_size);
data_offset_ += buf_size;
}
- *bytes_read = buf_size;
+ return buf_size;
}
namespace {
diff --git a/mojo/services/network/url_loader_impl_apptest.cc b/mojo/services/network/url_loader_impl_apptest.cc
index 2fb2210..76c1fee 100644
--- a/mojo/services/network/url_loader_impl_apptest.cc
+++ b/mojo/services/network/url_loader_impl_apptest.cc
@@ -49,30 +49,22 @@ class TestURLRequestJob : public net::URLRequestJob {
void Start() override { status_ = STARTED; }
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override {
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override {
status_ = READING;
buf_size_ = buf_size;
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- return false;
+ return net::ERR_IO_PENDING;
}
void NotifyHeadersComplete() { net::URLRequestJob::NotifyHeadersComplete(); }
- void NotifyReadComplete(int bytes_read) {
- if (bytes_read < 0) {
- status_ = COMPLETED;
- NotifyDone(net::URLRequestStatus(
- net::URLRequestStatus::FromError(net::ERR_FAILED)));
- net::URLRequestJob::NotifyReadComplete(0);
- } else if (bytes_read == 0) {
- status_ = COMPLETED;
- NotifyDone(net::URLRequestStatus());
- net::URLRequestJob::NotifyReadComplete(bytes_read);
- } else {
- status_ = STARTED;
- SetStatus(net::URLRequestStatus());
- net::URLRequestJob::NotifyReadComplete(bytes_read);
- }
+ void NotifyReadComplete(int result) {
+ status_ = result <= 0 ? COMPLETED : STARTED;
+
+ // Map errors to net::ERR_FAILED.
+ if (result < 0)
+ result = net::ERR_FAILED;
+
+ ReadRawDataComplete(result);
}
private:
diff --git a/net/test/url_request/url_request_failed_job.cc b/net/test/url_request/url_request_failed_job.cc
index e4ac6a6..d7c479b 100644
--- a/net/test/url_request/url_request_failed_job.cc
+++ b/net/test/url_request/url_request_failed_job.cc
@@ -97,40 +97,20 @@ URLRequestFailedJob::URLRequestFailedJob(URLRequest* request,
}
void URLRequestFailedJob::Start() {
- if (phase_ == START) {
- if (net_error_ != ERR_IO_PENDING) {
- NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, net_error_));
- return;
- }
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- return;
- }
- response_info_.headers = new net::HttpResponseHeaders("HTTP/1.1 200 OK");
- NotifyHeadersComplete();
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&URLRequestFailedJob::StartAsync, weak_factory_.GetWeakPtr()));
}
-bool URLRequestFailedJob::ReadRawData(IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
+int URLRequestFailedJob::ReadRawData(IOBuffer* buf, int buf_size) {
CHECK(phase_ == READ_SYNC || phase_ == READ_ASYNC);
- if (net_error_ != ERR_IO_PENDING && phase_ == READ_SYNC) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, net_error_));
- return false;
- }
-
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
-
- if (net_error_ == ERR_IO_PENDING)
- return false;
-
- DCHECK_EQ(READ_ASYNC, phase_);
- DCHECK_NE(ERR_IO_PENDING, net_error_);
+ if (net_error_ == ERR_IO_PENDING || phase_ == READ_SYNC)
+ return net_error_;
base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- base::Bind(&URLRequestFailedJob::NotifyDone, weak_factory_.GetWeakPtr(),
- URLRequestStatus(URLRequestStatus::FAILED, net_error_)));
- return false;
+ FROM_HERE, base::Bind(&URLRequestFailedJob::ReadRawDataComplete,
+ weak_factory_.GetWeakPtr(), net_error_));
+ return ERR_IO_PENDING;
}
int URLRequestFailedJob::GetResponseCode() const {
@@ -195,4 +175,17 @@ GURL URLRequestFailedJob::GetMockHttpsUrlForHostname(
URLRequestFailedJob::~URLRequestFailedJob() {
}
+void URLRequestFailedJob::StartAsync() {
+ if (phase_ == START) {
+ if (net_error_ != ERR_IO_PENDING) {
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, net_error_));
+ return;
+ }
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ return;
+ }
+ response_info_.headers = new net::HttpResponseHeaders("HTTP/1.1 200 OK");
+ NotifyHeadersComplete();
+}
+
} // namespace net
diff --git a/net/test/url_request/url_request_failed_job.h b/net/test/url_request/url_request_failed_job.h
index 0413111..45b1911 100644
--- a/net/test/url_request/url_request_failed_job.h
+++ b/net/test/url_request/url_request_failed_job.h
@@ -39,7 +39,7 @@ class URLRequestFailedJob : public URLRequestJob {
// URLRequestJob implementation:
void Start() override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
int GetResponseCode() const override;
void GetResponseInfo(HttpResponseInfo* info) override;
@@ -71,6 +71,7 @@ class URLRequestFailedJob : public URLRequestJob {
protected:
~URLRequestFailedJob() override;
+ void StartAsync();
private:
HttpResponseInfo response_info_;
diff --git a/net/test/url_request/url_request_mock_data_job.cc b/net/test/url_request/url_request_mock_data_job.cc
index 9549242..b9ef385 100644
--- a/net/test/url_request/url_request_mock_data_job.cc
+++ b/net/test/url_request/url_request_mock_data_job.cc
@@ -104,15 +104,12 @@ void URLRequestMockDataJob::Start() {
URLRequestMockDataJob::~URLRequestMockDataJob() {
}
-bool URLRequestMockDataJob::ReadRawData(IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
- DCHECK(bytes_read);
- *bytes_read = static_cast<int>(
- std::min(static_cast<size_t>(buf_size), data_.length() - data_offset_));
- memcpy(buf->data(), data_.c_str() + data_offset_, *bytes_read);
- data_offset_ += *bytes_read;
- return true;
+int URLRequestMockDataJob::ReadRawData(IOBuffer* buf, int buf_size) {
+ int bytes_read =
+ std::min(static_cast<size_t>(buf_size), data_.length() - data_offset_);
+ memcpy(buf->data(), data_.c_str() + data_offset_, bytes_read);
+ data_offset_ += bytes_read;
+ return bytes_read;
}
int URLRequestMockDataJob::GetResponseCode() const {
diff --git a/net/test/url_request/url_request_mock_data_job.h b/net/test/url_request/url_request_mock_data_job.h
index 3d84e37..14ad665 100644
--- a/net/test/url_request/url_request_mock_data_job.h
+++ b/net/test/url_request/url_request_mock_data_job.h
@@ -24,7 +24,7 @@ class URLRequestMockDataJob : public URLRequestJob {
int data_repeat_count);
void Start() override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
int GetResponseCode() const override;
void GetResponseInfo(HttpResponseInfo* info) override;
diff --git a/net/test/url_request/url_request_slow_download_job.cc b/net/test/url_request/url_request_slow_download_job.cc
index f344feb..52fd71c 100644
--- a/net/test/url_request/url_request_slow_download_job.cc
+++ b/net/test/url_request/url_request_slow_download_job.cc
@@ -179,39 +179,34 @@ URLRequestSlowDownloadJob::FillBufferHelper(IOBuffer* buf,
return REQUEST_COMPLETE;
}
-bool URLRequestSlowDownloadJob::ReadRawData(IOBuffer* buf,
- int buf_size,
- int* bytes_read) {
+int URLRequestSlowDownloadJob::ReadRawData(IOBuffer* buf, int buf_size) {
if (base::LowerCaseEqualsASCII(kFinishDownloadUrl,
request_->url().spec().c_str()) ||
base::LowerCaseEqualsASCII(kErrorDownloadUrl,
request_->url().spec().c_str())) {
VLOG(10) << __FUNCTION__ << " called w/ kFinish/ErrorDownloadUrl.";
- *bytes_read = 0;
- return true;
+ return 0;
}
VLOG(10) << __FUNCTION__ << " called at position " << bytes_already_sent_
<< " in the stream.";
- ReadStatus status = FillBufferHelper(buf, buf_size, bytes_read);
+ int bytes_read = 0;
+ ReadStatus status = FillBufferHelper(buf, buf_size, &bytes_read);
switch (status) {
case BUFFER_FILLED:
- return true;
+ case REQUEST_COMPLETE:
+ return bytes_read;
case REQUEST_BLOCKED:
buffer_ = buf;
buffer_size_ = buf_size;
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&URLRequestSlowDownloadJob::CheckDoneStatus,
weak_factory_.GetWeakPtr()),
base::TimeDelta::FromMilliseconds(100));
- return false;
- case REQUEST_COMPLETE:
- *bytes_read = 0;
- return true;
+ return ERR_IO_PENDING;
}
NOTREACHED();
- return true;
+ return OK;
}
void URLRequestSlowDownloadJob::CheckDoneStatus() {
@@ -223,12 +218,10 @@ void URLRequestSlowDownloadJob::CheckDoneStatus() {
FillBufferHelper(buffer_.get(), buffer_size_, &bytes_written);
DCHECK_EQ(BUFFER_FILLED, status);
buffer_ = NULL; // Release the reference.
- SetStatus(URLRequestStatus());
- NotifyReadComplete(bytes_written);
+ ReadRawDataComplete(bytes_written);
} else if (should_error_download_) {
VLOG(10) << __FUNCTION__ << " called w/ should_finish_ownload_ set.";
- NotifyDone(
- URLRequestStatus(URLRequestStatus::FAILED, ERR_CONNECTION_RESET));
+ ReadRawDataComplete(ERR_CONNECTION_RESET);
} else {
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&URLRequestSlowDownloadJob::CheckDoneStatus,
diff --git a/net/test/url_request/url_request_slow_download_job.h b/net/test/url_request/url_request_slow_download_job.h
index 115a6ac..fcd7f661 100644
--- a/net/test/url_request/url_request_slow_download_job.h
+++ b/net/test/url_request/url_request_slow_download_job.h
@@ -37,7 +37,7 @@ class URLRequestSlowDownloadJob : public URLRequestJob {
void Start() override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(HttpResponseInfo* info) override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
// Returns the current number of URLRequestSlowDownloadJobs that have
// not yet completed.
diff --git a/net/url_request/url_request_file_dir_job.cc b/net/url_request/url_request_file_dir_job.cc
index 64597d5..1dafbf2 100644
--- a/net/url_request/url_request_file_dir_job.cc
+++ b/net/url_request/url_request_file_dir_job.cc
@@ -13,7 +13,6 @@
#include "base/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/url_request/url_request_status.h"
#include "url/gurl.h"
@@ -66,23 +65,19 @@ void URLRequestFileDirJob::Kill() {
weak_factory_.InvalidateWeakPtrs();
}
-bool URLRequestFileDirJob::ReadRawData(IOBuffer* buf, int buf_size,
- int* bytes_read) {
- DCHECK(bytes_read);
- *bytes_read = 0;
-
+int URLRequestFileDirJob::ReadRawData(IOBuffer* buf, int buf_size) {
if (is_done())
- return true;
+ return 0;
- if (FillReadBuffer(buf->data(), buf_size, bytes_read))
- return true;
+ int bytes_read = 0;
+ if (FillReadBuffer(buf->data(), buf_size, &bytes_read))
+ return bytes_read;
// We are waiting for more data
read_pending_ = true;
read_buffer_ = buf;
read_buffer_length_ = buf_size;
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- return false;
+ return ERR_IO_PENDING;
}
bool URLRequestFileDirJob::GetMimeType(std::string* mime_type) const {
@@ -131,40 +126,45 @@ void URLRequestFileDirJob::OnListFile(
data.info.GetLastModifiedTime()));
// TODO(darin): coalesce more?
- CompleteRead();
+ CompleteRead(OK);
}
void URLRequestFileDirJob::OnListDone(int error) {
DCHECK(!canceled_);
- if (error != OK) {
- read_pending_ = false;
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, error));
- } else {
+ DCHECK_LE(error, OK);
+ if (error == OK)
list_complete_ = true;
- CompleteRead();
- }
+ CompleteRead(static_cast<Error>(error));
}
URLRequestFileDirJob::~URLRequestFileDirJob() {}
-void URLRequestFileDirJob::CompleteRead() {
- if (read_pending_) {
- int bytes_read;
+void URLRequestFileDirJob::CompleteRead(Error status) {
+ DCHECK_LE(status, OK);
+ DCHECK_NE(status, ERR_IO_PENDING);
+
+ // Do nothing if there is no read pending.
+ if (!read_pending_)
+ return;
+
+ int result = status;
+ if (status == OK) {
+ int filled_bytes = 0;
if (FillReadBuffer(read_buffer_->data(), read_buffer_length_,
- &bytes_read)) {
+ &filled_bytes)) {
+ result = filled_bytes;
// We completed the read, so reset the read buffer.
- read_pending_ = false;
read_buffer_ = NULL;
read_buffer_length_ = 0;
-
- SetStatus(URLRequestStatus());
- NotifyReadComplete(bytes_read);
} else {
NOTREACHED();
// TODO: Better error code.
- NotifyDone(URLRequestStatus::FromError(ERR_FAILED));
+ result = ERR_FAILED;
}
}
+
+ read_pending_ = false;
+ ReadRawDataComplete(result);
}
bool URLRequestFileDirJob::FillReadBuffer(char* buf, int buf_size,
diff --git a/net/url_request/url_request_file_dir_job.h b/net/url_request/url_request_file_dir_job.h
index dfb2d74..f7a7b45 100644
--- a/net/url_request/url_request_file_dir_job.h
+++ b/net/url_request/url_request_file_dir_job.h
@@ -10,6 +10,7 @@
#include "base/files/file_path.h"
#include "base/memory/weak_ptr.h"
#include "net/base/directory_lister.h"
+#include "net/base/net_errors.h"
#include "net/url_request/url_request_job.h"
namespace net {
@@ -29,7 +30,7 @@ class URLRequestFileDirJob
// Overridden from URLRequestJob:
void Start() override;
void Kill() override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
bool GetMimeType(std::string* mime_type) const override;
bool GetCharset(std::string* charset) override;
@@ -45,10 +46,10 @@ class URLRequestFileDirJob
// When we have data and a read has been pending, this function
// will fill the response buffer and notify the request
// appropriately.
- void CompleteRead();
+ void CompleteRead(Error error);
// Fills a buffer with the output.
- bool FillReadBuffer(char *buf, int buf_size, int *bytes_read);
+ bool FillReadBuffer(char* buf, int buf_size, int* bytes_read);
DirectoryLister lister_;
base::FilePath dir_path_;
@@ -67,6 +68,7 @@ class URLRequestFileDirJob
bool read_pending_;
scoped_refptr<IOBuffer> read_buffer_;
int read_buffer_length_;
+
base::WeakPtrFactory<URLRequestFileDirJob> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(URLRequestFileDirJob);
diff --git a/net/url_request/url_request_file_job.cc b/net/url_request/url_request_file_job.cc
index b035c0d..2c5cf6a 100644
--- a/net/url_request/url_request_file_job.cc
+++ b/net/url_request/url_request_file_job.cc
@@ -33,7 +33,6 @@
#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
#include "net/base/mime_util.h"
-#include "net/base/net_errors.h"
#include "net/filter/filter.h"
#include "net/http/http_util.h"
#include "net/url_request/url_request_error_job.h"
@@ -63,6 +62,7 @@ URLRequestFileJob::URLRequestFileJob(
stream_(new FileStream(file_task_runner)),
file_task_runner_(file_task_runner),
remaining_bytes_(0),
+ range_parse_result_(OK),
weak_ptr_factory_(this) {}
void URLRequestFileJob::Start() {
@@ -83,22 +83,17 @@ void URLRequestFileJob::Kill() {
URLRequestJob::Kill();
}
-bool URLRequestFileJob::ReadRawData(IOBuffer* dest,
- int dest_size,
- int* bytes_read) {
+int URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size) {
DCHECK_NE(dest_size, 0);
- DCHECK(bytes_read);
DCHECK_GE(remaining_bytes_, 0);
if (remaining_bytes_ < dest_size)
- dest_size = static_cast<int>(remaining_bytes_);
+ dest_size = remaining_bytes_;
// If we should copy zero bytes because |remaining_bytes_| is zero, short
// circuit here.
- if (!dest_size) {
- *bytes_read = 0;
- return true;
- }
+ if (!dest_size)
+ return 0;
int rv = stream_->Read(dest,
dest_size,
@@ -106,20 +101,11 @@ bool URLRequestFileJob::ReadRawData(IOBuffer* dest,
weak_ptr_factory_.GetWeakPtr(),
make_scoped_refptr(dest)));
if (rv >= 0) {
- // Data is immediately available.
- *bytes_read = rv;
remaining_bytes_ -= rv;
DCHECK_GE(remaining_bytes_, 0);
- return true;
}
- // Otherwise, a read error occured. We may just need to wait...
- if (rv == ERR_IO_PENDING) {
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- } else {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
- }
- return false;
+ return rv;
}
bool URLRequestFileJob::IsRedirectResponse(GURL* location,
@@ -179,7 +165,10 @@ void URLRequestFileJob::SetExtraRequestHeaders(
const HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) {
- // We only care about "Range" header here.
+ // This job only cares about the Range header. This method stashes the value
+ // for later use in DidOpen(), which is responsible for some of the range
+ // validation as well. NotifyStartError is not legal to call here since
+ // the job has not started.
std::vector<HttpByteRange> ranges;
if (HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
@@ -189,8 +178,7 @@ void URLRequestFileJob::SetExtraRequestHeaders(
// because we need to do multipart encoding here.
// TODO(hclam): decide whether we want to support multiple range
// requests.
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
}
}
}
@@ -251,13 +239,19 @@ void URLRequestFileJob::DidFetchMetaInfo(const FileMetaInfo* meta_info) {
void URLRequestFileJob::DidOpen(int result) {
if (result != OK) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+ return;
+ }
+
+ if (range_parse_result_ != net::OK) {
+ NotifyStartError(
+ URLRequestStatus(URLRequestStatus::FAILED, range_parse_result_));
return;
}
if (!byte_range_.ComputeBounds(meta_info_.file_size)) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -285,8 +279,8 @@ void URLRequestFileJob::DidOpen(int result) {
void URLRequestFileJob::DidSeek(int64 result) {
OnSeekComplete(result);
if (result != byte_range_.first_byte_position()) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -295,8 +289,7 @@ void URLRequestFileJob::DidSeek(int64 result) {
}
void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) {
- if (result > 0) {
- SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
+ if (result >= 0) {
remaining_bytes_ -= result;
DCHECK_GE(remaining_bytes_, 0);
}
@@ -304,13 +297,7 @@ void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) {
OnReadComplete(buf.get(), result);
buf = NULL;
- if (result == 0) {
- NotifyDone(URLRequestStatus());
- } else if (result < 0) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
- }
-
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
}
} // namespace net
diff --git a/net/url_request/url_request_file_job.h b/net/url_request/url_request_file_job.h
index 0436dac..97c6c21 100644
--- a/net/url_request/url_request_file_job.h
+++ b/net/url_request/url_request_file_job.h
@@ -11,6 +11,7 @@
#include "base/files/file_path.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "net/base/net_errors.h"
#include "net/base/net_export.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request.h"
@@ -38,7 +39,7 @@ class NET_EXPORT URLRequestFileJob : public URLRequestJob {
// URLRequestJob:
void Start() override;
void Kill() override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
Filter* SetupFilter() const override;
bool GetMimeType(std::string* mime_type) const override;
@@ -97,9 +98,12 @@ class NET_EXPORT URLRequestFileJob : public URLRequestJob {
FileMetaInfo meta_info_;
const scoped_refptr<base::TaskRunner> file_task_runner_;
+ std::vector<HttpByteRange> byte_ranges_;
HttpByteRange byte_range_;
int64 remaining_bytes_;
+ Error range_parse_result_;
+
base::WeakPtrFactory<URLRequestFileJob> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(URLRequestFileJob);
diff --git a/net/url_request/url_request_ftp_job.cc b/net/url_request/url_request_ftp_job.cc
index 0d3eb25..e422d03 100644
--- a/net/url_request/url_request_ftp_job.cc
+++ b/net/url_request/url_request_ftp_job.cc
@@ -239,7 +239,7 @@ void URLRequestFtpJob::OnStartCompleted(int result) {
HandleAuthNeededResponse();
return;
} else {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
}
}
@@ -251,15 +251,7 @@ void URLRequestFtpJob::OnStartCompletedAsync(int result) {
void URLRequestFtpJob::OnReadCompleted(int result) {
read_in_progress_ = false;
- if (result == 0) {
- NotifyDone(URLRequestStatus());
- } else if (result < 0) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
- } else {
- // Clear the IO_PENDING status
- SetStatus(URLRequestStatus());
- }
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
}
void URLRequestFtpJob::RestartTransactionWithAuth() {
@@ -352,14 +344,12 @@ UploadProgress URLRequestFtpJob::GetUploadProgress() const {
return UploadProgress();
}
-bool URLRequestFtpJob::ReadRawData(IOBuffer* buf,
- int buf_size,
- int *bytes_read) {
+int URLRequestFtpJob::ReadRawData(IOBuffer* buf, int buf_size) {
DCHECK_NE(buf_size, 0);
- DCHECK(bytes_read);
DCHECK(!read_in_progress_);
int rv;
+
if (proxy_info_.is_direct()) {
rv = ftp_transaction_->Read(buf, buf_size,
base::Bind(&URLRequestFtpJob::OnReadCompleted,
@@ -370,18 +360,9 @@ bool URLRequestFtpJob::ReadRawData(IOBuffer* buf,
base::Unretained(this)));
}
- if (rv >= 0) {
- *bytes_read = rv;
- return true;
- }
-
- if (rv == ERR_IO_PENDING) {
+ if (rv == ERR_IO_PENDING)
read_in_progress_ = true;
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- } else {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
- }
- return false;
+ return rv;
}
void URLRequestFtpJob::HandleAuthNeededResponse() {
diff --git a/net/url_request/url_request_ftp_job.h b/net/url_request/url_request_ftp_job.h
index 74caf72..6315d8f 100644
--- a/net/url_request/url_request_ftp_job.h
+++ b/net/url_request/url_request_ftp_job.h
@@ -71,7 +71,7 @@ class NET_EXPORT_PRIVATE URLRequestFtpJob : public URLRequestJob {
// TODO(ibrar): Yet to give another look at this function.
UploadProgress GetUploadProgress() const override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
void HandleAuthNeededResponse();
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index b34f825..13365df 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -414,11 +414,6 @@ void URLRequestHttpJob::NotifyHeadersComplete() {
URLRequestJob::NotifyHeadersComplete();
}
-void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
- DoneWithRequest(FINISHED);
- URLRequestJob::NotifyDone(status);
-}
-
void URLRequestHttpJob::DestroyTransaction() {
DCHECK(transaction_.get());
@@ -998,19 +993,16 @@ void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
void URLRequestHttpJob::OnReadCompleted(int result) {
read_in_progress_ = false;
+ DCHECK_NE(ERR_IO_PENDING, result);
+
if (ShouldFixMismatchedContentLength(result))
result = OK;
- if (result == OK) {
- NotifyDone(URLRequestStatus());
- } else if (result < 0) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
- } else {
- // Clear the IO_PENDING status
- SetStatus(URLRequestStatus());
- }
+ // EOF or error, done with this job.
+ if (result <= 0)
+ DoneWithRequest(FINISHED);
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
}
void URLRequestHttpJob::RestartTransactionWithAuth(
@@ -1334,10 +1326,8 @@ bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
return false;
}
-bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
- int* bytes_read) {
+int URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size) {
DCHECK_NE(buf_size, 0);
- DCHECK(bytes_read);
DCHECK(!read_in_progress_);
int rv = transaction_->Read(
@@ -1345,23 +1335,15 @@ bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
if (ShouldFixMismatchedContentLength(rv))
- rv = 0;
+ rv = OK;
- if (rv >= 0) {
- *bytes_read = rv;
- if (!rv)
- DoneWithRequest(FINISHED);
- return true;
- }
+ if (rv == 0 || (rv < 0 && rv != ERR_IO_PENDING))
+ DoneWithRequest(FINISHED);
- if (rv == ERR_IO_PENDING) {
+ if (rv == ERR_IO_PENDING)
read_in_progress_ = true;
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- } else {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
- }
- return false;
+ return rv;
}
void URLRequestHttpJob::StopCaching() {
diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h
index 4ee4440..f9ce401 100644
--- a/net/url_request/url_request_http_job.h
+++ b/net/url_request/url_request_http_job.h
@@ -78,9 +78,6 @@ class NET_EXPORT_PRIVATE URLRequestHttpJob : public URLRequestJob {
// Shadows URLRequestJob's version of this method so we can grab cookies.
void NotifyHeadersComplete();
- // Shadows URLRequestJob's method so we can record histograms.
- void NotifyDone(const URLRequestStatus& status);
-
void DestroyTransaction();
void AddExtraHeaders();
@@ -131,7 +128,7 @@ class NET_EXPORT_PRIVATE URLRequestHttpJob : public URLRequestJob {
void ContinueWithCertificate(X509Certificate* client_cert) override;
void ContinueDespiteLastError() override;
void ResumeNetworkStart() override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
void StopCaching() override;
bool GetFullRequestHeaders(HttpRequestHeaders* headers) const override;
int64 GetTotalReceivedBytes() const override;
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 641c07b..14ae8e5 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -101,45 +101,46 @@ void URLRequestJob::DetachRequest() {
request_ = NULL;
}
-// This function calls ReadData to get stream data. If a filter exists, passes
-// the data to the attached filter. Then returns the output from filter back to
-// the caller.
+// This function calls ReadRawData to get stream data. If a filter exists, it
+// passes the data to the attached filter. It then returns the output from
+// filter back to the caller.
bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
- bool rv = false;
-
DCHECK_LT(buf_size, 1000000); // Sanity check.
DCHECK(buf);
DCHECK(bytes_read);
DCHECK(filtered_read_buffer_.get() == NULL);
DCHECK_EQ(0, filtered_read_buffer_len_);
+ Error error = OK;
*bytes_read = 0;
// Skip Filter if not present.
- if (!filter_.get()) {
- rv = ReadRawDataHelper(buf, buf_size, bytes_read);
+ if (!filter_) {
+ error = ReadRawDataHelper(buf, buf_size, bytes_read);
} else {
// Save the caller's buffers while we do IO
// in the filter's buffers.
filtered_read_buffer_ = buf;
filtered_read_buffer_len_ = buf_size;
- if (ReadFilteredData(bytes_read)) {
- rv = true; // We have data to return.
+ error = ReadFilteredData(bytes_read);
- // It is fine to call DoneReading even if ReadFilteredData receives 0
- // bytes from the net, but we avoid making that call if we know for
- // sure that's the case (ReadRawDataHelper path).
- if (*bytes_read == 0)
- DoneReading();
- } else {
- rv = false; // Error, or a new IO is pending.
- }
+ // Synchronous EOF from the filter.
+ if (error == OK && *bytes_read == 0)
+ DoneReading();
}
- if (rv && *bytes_read == 0)
- NotifyDone(URLRequestStatus());
- return rv;
+ if (error == OK) {
+ // If URLRequestJob read zero bytes, the job is at EOF.
+ if (*bytes_read == 0)
+ NotifyDone(URLRequestStatus());
+ } else if (error == ERR_IO_PENDING) {
+ SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING));
+ } else {
+ NotifyDone(URLRequestStatus::FromError(error));
+ *bytes_read = -1;
+ }
+ return error == OK;
}
void URLRequestJob::StopCaching() {
@@ -480,11 +481,25 @@ void URLRequestJob::NotifyHeadersComplete() {
request_->NotifyResponseStarted();
}
-void URLRequestJob::NotifyReadComplete(int bytes_read) {
+void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) {
+ if (result >= 0) {
+ *error = OK;
+ *count = result;
+ } else {
+ *error = static_cast<Error>(result);
+ *count = 0;
+ }
+}
+
+void URLRequestJob::ReadRawDataComplete(int result) {
// TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed.
tracked_objects::ScopedTracker tracking_profile(
FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "475755 URLRequestJob::NotifyReadComplete"));
+ "475755 URLRequestJob::RawReadCompleted"));
+
+ Error error;
+ int bytes_read;
+ ConvertResultToError(result, &error, &bytes_read);
if (!request_ || !request_->has_delegate())
return; // The request was destroyed, so there is no more work to do.
@@ -497,11 +512,32 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) {
// The headers should be complete before reads complete
DCHECK(has_handled_response_);
- OnRawReadComplete(bytes_read);
+ GatherRawReadStats(error, bytes_read);
- // Don't notify if we had an error.
- if (!request_->status().is_success())
- return;
+ if (filter_.get() && error == OK) {
+ int filter_bytes_read = 0;
+ // Tell the filter that it has more data.
+ PushInputToFilter(bytes_read);
+
+ // Filter the data.
+ error = ReadFilteredData(&filter_bytes_read);
+
+ if (!filter_bytes_read)
+ DoneReading();
+
+ DVLOG(1) << __FUNCTION__ << "() "
+ << "\"" << (request_ ? request_->url().spec() : "???") << "\""
+ << " pre bytes read = " << bytes_read
+ << " pre total = " << prefilter_bytes_read_
+ << " post total = " << postfilter_bytes_read_;
+ bytes_read = filter_bytes_read;
+ } else {
+ DVLOG(1) << __FUNCTION__ << "() "
+ << "\"" << (request_ ? request_->url().spec() : "???") << "\""
+ << " pre bytes read = " << bytes_read
+ << " pre total = " << prefilter_bytes_read_
+ << " post total = " << postfilter_bytes_read_;
+ }
// When notifying the delegate, the delegate can release the request
// (and thus release 'this'). After calling to the delegate, we must
@@ -510,25 +546,24 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) {
// survival until we can get out of this method.
scoped_refptr<URLRequestJob> self_preservation(this);
- if (filter_.get()) {
- // Tell the filter that it has more data
- FilteredDataRead(bytes_read);
-
- // Filter the data.
- int filter_bytes_read = 0;
- if (ReadFilteredData(&filter_bytes_read)) {
- if (!filter_bytes_read)
- DoneReading();
- request_->NotifyReadCompleted(filter_bytes_read);
- }
+ // Synchronize the URLRequest state machine with the URLRequestJob state
+ // machine. If this read succeeded, either the request is at EOF and the
+ // URLRequest state machine goes to 'finished', or it is not and the
+ // URLRequest state machine goes to 'success'. If the read failed, the
+ // URLRequest state machine goes directly to 'finished'.
+ //
+ // Update the URLRequest's status first, so that NotifyReadCompleted has an
+ // accurate view of the request.
+ if (error == OK && bytes_read > 0) {
+ SetStatus(URLRequestStatus());
} else {
- request_->NotifyReadCompleted(bytes_read);
+ NotifyDone(URLRequestStatus::FromError(error));
}
- DVLOG(1) << __FUNCTION__ << "() "
- << "\"" << (request_ ? request_->url().spec() : "???") << "\""
- << " pre bytes read = " << bytes_read
- << " pre total = " << prefilter_bytes_read_
- << " post total = " << postfilter_bytes_read_;
+
+ // NotifyReadCompleted should be called after SetStatus or NotifyDone updates
+ // the status.
+ if (error == OK)
+ request_->NotifyReadCompleted(bytes_read);
}
void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
@@ -555,7 +590,7 @@ void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
// the response before getting here.
DCHECK(has_handled_response_ || !status.is_success());
- // As with NotifyReadComplete, we need to take care to notice if we were
+ // As with RawReadCompleted, we need to take care to notice if we were
// destroyed during a delegate callback.
if (request_) {
request_->set_is_pending(false);
@@ -638,11 +673,8 @@ void URLRequestJob::OnCallToDelegateComplete() {
request_->OnCallToDelegateComplete();
}
-bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
- int *bytes_read) {
- DCHECK(bytes_read);
- *bytes_read = 0;
- return true;
+int URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size) {
+ return 0;
}
void URLRequestJob::DoneReading() {
@@ -652,38 +684,34 @@ void URLRequestJob::DoneReading() {
void URLRequestJob::DoneReadingRedirectResponse() {
}
-void URLRequestJob::FilteredDataRead(int bytes_read) {
+void URLRequestJob::PushInputToFilter(int bytes_read) {
DCHECK(filter_);
filter_->FlushStreamBuffer(bytes_read);
}
-bool URLRequestJob::ReadFilteredData(int* bytes_read) {
+Error URLRequestJob::ReadFilteredData(int* bytes_read) {
DCHECK(filter_);
DCHECK(filtered_read_buffer_.get());
DCHECK_GT(filtered_read_buffer_len_, 0);
DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
- DCHECK(!raw_read_buffer_.get());
+ DCHECK(!raw_read_buffer_);
*bytes_read = 0;
- bool rv = false;
+ Error error = ERR_FAILED;
for (;;) {
if (is_done())
- return true;
+ return OK;
if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
// We don't have any raw data to work with, so read from the transaction.
int filtered_data_read;
- if (ReadRawDataForFilter(&filtered_data_read)) {
- if (filtered_data_read > 0) {
- // Give data to filter.
- filter_->FlushStreamBuffer(filtered_data_read);
- } else {
- return true; // EOF.
- }
- } else {
- return false; // IO Pending (or error).
- }
+ error = ReadRawDataForFilter(&filtered_data_read);
+ // If ReadRawDataForFilter returned some data, fall through to the case
+ // below; otherwise, return early.
+ if (error != OK || filtered_data_read == 0)
+ return error;
+ filter_->FlushStreamBuffer(filtered_data_read);
}
if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
@@ -709,7 +737,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) {
filter_needs_more_output_space_ = false;
*bytes_read = filtered_data_len;
postfilter_bytes_read_ += filtered_data_len;
- rv = true;
+ error = OK;
break;
}
case Filter::FILTER_NEED_MORE_DATA: {
@@ -722,7 +750,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) {
if (filtered_data_len > 0) {
*bytes_read = filtered_data_len;
postfilter_bytes_read_ += filtered_data_len;
- rv = true;
+ error = OK;
} else {
// Read again since we haven't received enough data yet (e.g., we
// may not have a complete gzip header yet).
@@ -733,7 +761,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) {
case Filter::FILTER_OK: {
*bytes_read = filtered_data_len;
postfilter_bytes_read_ += filtered_data_len;
- rv = true;
+ error = OK;
break;
}
case Filter::FILTER_ERROR: {
@@ -741,21 +769,19 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) {
<< "\"" << (request_ ? request_->url().spec() : "???")
<< "\"" << " Filter Error";
filter_needs_more_output_space_ = false;
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_CONTENT_DECODING_FAILED));
- rv = false;
+ error = ERR_CONTENT_DECODING_FAILED;
break;
}
default: {
NOTREACHED();
filter_needs_more_output_space_ = false;
- rv = false;
+ error = ERR_FAILED;
break;
}
}
// If logging all bytes is enabled, log the filtered bytes read.
- if (rv && request() && filtered_data_len > 0 &&
+ if (error == OK && request() && filtered_data_len > 0 &&
request()->net_log().IsCapturing()) {
request()->net_log().AddByteTransferEvent(
NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len,
@@ -763,18 +789,18 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) {
}
} else {
// we are done, or there is no data left.
- rv = true;
+ error = OK;
}
break;
}
- if (rv) {
+ if (error == OK) {
// When we successfully finished a read, we no longer need to save the
// caller's buffers. Release our reference.
filtered_read_buffer_ = NULL;
filtered_read_buffer_len_ = 0;
}
- return rv;
+ return error;
}
void URLRequestJob::DestroyFilters() {
@@ -807,9 +833,8 @@ void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
request_->proxy_server_ = proxy_server;
}
-bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
- bool rv = false;
-
+Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
+ Error error = ERR_FAILED;
DCHECK(bytes_read);
DCHECK(filter_.get());
@@ -821,29 +846,28 @@ bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
if (!filter_->stream_data_len() && !is_done()) {
IOBuffer* stream_buffer = filter_->stream_buffer();
int stream_buffer_size = filter_->stream_buffer_size();
- rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
+ error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
}
- return rv;
+ return error;
}
-bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
- int* bytes_read) {
- DCHECK(!request_->status().is_io_pending());
- DCHECK(raw_read_buffer_.get() == NULL);
+Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf,
+ int buf_size,
+ int* bytes_read) {
+ DCHECK(!raw_read_buffer_);
- // Keep a pointer to the read buffer, so we have access to it in the
- // OnRawReadComplete() callback in the event that the read completes
- // asynchronously.
+ // Keep a pointer to the read buffer, so we have access to it in
+ // GatherRawReadStats() in the event that the read completes asynchronously.
raw_read_buffer_ = buf;
- bool rv = ReadRawData(buf, buf_size, bytes_read);
+ Error error;
+ ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read);
- if (!request_->status().is_io_pending()) {
- // If the read completes synchronously, either success or failure,
- // invoke the OnRawReadComplete callback so we can account for the
- // completed read.
- OnRawReadComplete(*bytes_read);
+ if (error != ERR_IO_PENDING) {
+ // If the read completes synchronously, either success or failure, invoke
+ // GatherRawReadStats so we can account for the completed read.
+ GatherRawReadStats(error, *bytes_read);
}
- return rv;
+ return error;
}
void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
@@ -852,20 +876,27 @@ void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
}
-void URLRequestJob::OnRawReadComplete(int bytes_read) {
- DCHECK(raw_read_buffer_.get());
- // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
+void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) {
+ DCHECK(raw_read_buffer_ || bytes_read == 0);
+ DCHECK_NE(ERR_IO_PENDING, error);
+
+ if (error != OK) {
+ raw_read_buffer_ = nullptr;
+ return;
+ }
+ // If |filter_| is non-NULL, bytes will be logged after it is applied
+ // instead.
if (!filter_.get() && request() && bytes_read > 0 &&
request()->net_log().IsCapturing()) {
request()->net_log().AddByteTransferEvent(
- NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
- bytes_read, raw_read_buffer_->data());
+ NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read,
+ raw_read_buffer_->data());
}
if (bytes_read > 0) {
RecordBytesRead(bytes_read);
}
- raw_read_buffer_ = NULL;
+ raw_read_buffer_ = nullptr;
}
void URLRequestJob::RecordBytesRead(int bytes_read) {
diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h
index 82b1363..002dc62 100644
--- a/net/url_request/url_request_job.h
+++ b/net/url_request/url_request_job.h
@@ -17,6 +17,7 @@
#include "base/power_monitor/power_observer.h"
#include "net/base/host_port_pair.h"
#include "net/base/load_states.h"
+#include "net/base/net_errors.h"
#include "net/base/net_export.h"
#include "net/base/request_priority.h"
#include "net/base/upload_progress.h"
@@ -276,23 +277,9 @@ class NET_EXPORT URLRequestJob
// Notifies the job that headers have been received.
void NotifyHeadersComplete();
- // Notifies the request that the job has completed a Read operation.
- void NotifyReadComplete(int bytes_read);
-
// Notifies the request that a start error has occurred.
void NotifyStartError(const URLRequestStatus& status);
- // NotifyDone marks when we are done with a request. It is really
- // a glorified set_status, but also does internal state checking and
- // job tracking. It should be called once per request, when the job is
- // finished doing all IO.
- void NotifyDone(const URLRequestStatus& status);
-
- // Some work performed by NotifyDone must be completed on a separate task
- // so as to avoid re-entering the delegate. This method exists to perform
- // that work.
- void CompleteNotifyDone();
-
// Used as an asynchronous callback for Kill to notify the URLRequest
// that we were canceled.
void NotifyCanceled();
@@ -305,17 +292,19 @@ class NET_EXPORT URLRequestJob
void OnCallToDelegate();
void OnCallToDelegateComplete();
- // Called to read raw (pre-filtered) data from this Job.
- // If returning true, data was read from the job. buf will contain
- // the data, and bytes_read will receive the number of bytes read.
- // If returning true, and bytes_read is returned as 0, there is no
- // additional data to be read.
- // If returning false, an error occurred or an async IO is now pending.
- // If async IO is pending, the status of the request will be
- // URLRequestStatus::IO_PENDING, and buf must remain available until the
- // operation is completed. See comments on URLRequest::Read for more
- // info.
- virtual bool ReadRawData(IOBuffer* buf, int buf_size, int *bytes_read);
+ // Called to read raw (pre-filtered) data from this Job. Reads at most
+ // |buf_size| bytes into |buf|.
+ // Possible return values:
+ // >= 0: Read completed synchronously. Return value is the number of bytes
+ // read. 0 means eof.
+ // ERR_IO_PENDING: Read pending asynchronously.
+ // When the read completes, |ReadRawDataComplete| should be
+ // called.
+ // Any other negative number: Read failed synchronously. Return value is a
+ // network error code.
+ // This method might hold onto a reference to |buf| (by incrementing the
+ // refcount) until the method completes or is cancelled.
+ virtual int ReadRawData(IOBuffer* buf, int buf_size);
// Called to tell the job that a filter has successfully reached the end of
// the stream.
@@ -326,14 +315,14 @@ class NET_EXPORT URLRequestJob
// bodies are never read.
virtual void DoneReadingRedirectResponse();
- // Informs the filter that data has been read into its buffer
- void FilteredDataRead(int bytes_read);
-
- // Reads filtered data from the request. Returns true if successful,
- // false otherwise. Note, if there is not enough data received to
- // return data, this call can issue a new async IO request under
- // the hood.
- bool ReadFilteredData(int *bytes_read);
+ // Reads filtered data from the request. Returns OK if immediately successful,
+ // ERR_IO_PENDING if the request couldn't complete synchronously, and some
+ // other error code if the request failed synchronously. Note that this
+ // function can issue new asynchronous requests if needed, in which case it
+ // returns ERR_IO_PENDING. If this method completes synchronously,
+ // |*bytes_read| is the number of bytes output by the filter chain if this
+ // method returns OK, or zero if this method returns an error.
+ Error ReadFilteredData(int* bytes_read);
// Whether the response is being filtered in this job.
// Only valid after NotifyHeadersComplete() has been called.
@@ -365,19 +354,33 @@ class NET_EXPORT URLRequestJob
// reflects bytes read even when there is no filter.
int64 postfilter_bytes_read() const { return postfilter_bytes_read_; }
+ // Turns an integer result code into an Error and a count of bytes read.
+ // The semantics are:
+ // |result| >= 0: |*error| == OK, |*count| == |result|
+ // |result| < 0: |*error| = |result|, |*count| == 0
+ static void ConvertResultToError(int result, Error* error, int* count);
+
+ // Completion callback for raw reads. See |ReadRawData| for details.
+ // |bytes_read| is either >= 0 to indicate a successful read and count of
+ // bytes read, or < 0 to indicate an error.
+ void ReadRawDataComplete(int bytes_read);
+
// The request that initiated this job. This value MAY BE NULL if the
// request was released by DetachRequest().
URLRequest* request_;
private:
// When data filtering is enabled, this function is used to read data
- // for the filter. Returns true if raw data was read. Returns false if
- // an error occurred (or we are waiting for IO to complete).
- bool ReadRawDataForFilter(int *bytes_read);
+ // for the filter. Returns a net error code to indicate if raw data was
+ // successfully read, an error happened, or the IO is pending.
+ Error ReadRawDataForFilter(int* bytes_read);
+
+ // Informs the filter chain that data has been read into its buffer.
+ void PushInputToFilter(int bytes_read);
// Invokes ReadRawData and records bytes read if the read completes
// synchronously.
- bool ReadRawDataHelper(IOBuffer* buf, int buf_size, int* bytes_read);
+ Error ReadRawDataHelper(IOBuffer* buf, int buf_size, int* bytes_read);
// Called in response to a redirect that was not canceled to follow the
// redirect. The current job will be replaced with a new job loading the
@@ -386,9 +389,9 @@ class NET_EXPORT URLRequestJob
// Called after every raw read. If |bytes_read| is > 0, this indicates
// a successful read of |bytes_read| unfiltered bytes. If |bytes_read|
- // is 0, this indicates that there is no additional data to read. If
- // |bytes_read| is < 0, an error occurred and no bytes were read.
- void OnRawReadComplete(int bytes_read);
+ // is 0, this indicates that there is no additional data to read. |error|
+ // specifies whether an error occurred and no bytes were read.
+ void GatherRawReadStats(Error error, int bytes_read);
// Updates the profiling info and notifies observers that an additional
// |bytes_read| unfiltered bytes have been read for this job.
@@ -398,6 +401,16 @@ class NET_EXPORT URLRequestJob
// out.
bool FilterHasData();
+ // NotifyDone marks that request is done. It is really a glorified
+ // set_status, but also does internal state checking and job tracking. It
+ // should be called once per request, when the job is finished doing all IO.
+ void NotifyDone(const URLRequestStatus& status);
+
+ // Some work performed by NotifyDone must be completed asynchronously so
+ // as to avoid re-entering URLRequest::Delegate. This method performs that
+ // work.
+ void CompleteNotifyDone();
+
// Subclasses may implement this method to record packet arrival times.
// The default implementation does nothing. Only invoked when bytes have been
// read since the last invocation.
diff --git a/net/url_request/url_request_simple_job.cc b/net/url_request/url_request_simple_job.cc
index 2fdbef4..c12555d 100644
--- a/net/url_request/url_request_simple_job.cc
+++ b/net/url_request/url_request_simple_job.cc
@@ -65,32 +65,20 @@ bool URLRequestSimpleJob::GetCharset(std::string* charset) {
URLRequestSimpleJob::~URLRequestSimpleJob() {}
-bool URLRequestSimpleJob::ReadRawData(IOBuffer* buf, int buf_size,
- int* bytes_read) {
- DCHECK(bytes_read);
- buf_size = static_cast<int>(
- std::min(static_cast<int64>(buf_size),
- byte_range_.last_byte_position() - next_data_offset_ + 1));
- DCHECK_GE(buf_size, 0);
- if (buf_size == 0) {
- *bytes_read = 0;
- return true;
- }
+int URLRequestSimpleJob::ReadRawData(IOBuffer* buf, int buf_size) {
+ buf_size = std::min(static_cast<int64>(buf_size),
+ byte_range_.last_byte_position() - next_data_offset_ + 1);
+ if (buf_size == 0)
+ return 0;
// Do memory copy on a background thread. See crbug.com/422489.
GetTaskRunner()->PostTaskAndReply(
FROM_HERE, base::Bind(&CopyData, make_scoped_refptr(buf), buf_size, data_,
next_data_offset_),
- base::Bind(&URLRequestSimpleJob::OnReadCompleted,
+ base::Bind(&URLRequestSimpleJob::ReadRawDataComplete,
weak_factory_.GetWeakPtr(), buf_size));
next_data_offset_ += buf_size;
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- return false;
-}
-
-void URLRequestSimpleJob::OnReadCompleted(int bytes_read) {
- SetStatus(URLRequestStatus());
- NotifyReadComplete(bytes_read);
+ return ERR_IO_PENDING;
}
base::TaskRunner* URLRequestSimpleJob::GetTaskRunner() const {
@@ -121,8 +109,8 @@ void URLRequestSimpleJob::StartAsync() {
return;
if (ranges().size() > 1) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -142,8 +130,8 @@ void URLRequestSimpleJob::OnGetDataCompleted(int result) {
if (result == OK) {
// Notify that the headers are complete
if (!byte_range_.ComputeBounds(data_->size())) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED,
+ ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
diff --git a/net/url_request/url_request_simple_job.h b/net/url_request/url_request_simple_job.h
index 6c9d5e8..06f718e 100644
--- a/net/url_request/url_request_simple_job.h
+++ b/net/url_request/url_request_simple_job.h
@@ -28,7 +28,7 @@ class NET_EXPORT URLRequestSimpleJob : public URLRangeRequestJob {
void Start() override;
void Kill() override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
bool GetMimeType(std::string* mime_type) const override;
bool GetCharset(std::string* charset) override;
@@ -66,7 +66,6 @@ class NET_EXPORT URLRequestSimpleJob : public URLRangeRequestJob {
private:
void OnGetDataCompleted(int result);
- void OnReadCompleted(int bytes_read);
HttpByteRange byte_range_;
std::string mime_type_;
diff --git a/net/url_request/url_request_status.cc b/net/url_request/url_request_status.cc
index 2a6a2fa..7207df2 100644
--- a/net/url_request/url_request_status.cc
+++ b/net/url_request/url_request_status.cc
@@ -46,4 +46,19 @@ URLRequestStatus URLRequestStatus::FromError(int error) {
}
}
+Error URLRequestStatus::ToNetError() const {
+ switch (status_) {
+ case SUCCESS:
+ return OK;
+ case IO_PENDING:
+ return ERR_IO_PENDING;
+ case CANCELED:
+ return ERR_ABORTED;
+ case FAILED:
+ return static_cast<Error>(error_);
+ }
+ NOTREACHED();
+ return ERR_FAILED;
+}
+
} // namespace net
diff --git a/net/url_request/url_request_status.h b/net/url_request/url_request_status.h
index 44a5d22..694c510 100644
--- a/net/url_request/url_request_status.h
+++ b/net/url_request/url_request_status.h
@@ -5,6 +5,7 @@
#ifndef NET_URL_REQUEST_URL_REQUEST_STATUS_H_
#define NET_URL_REQUEST_URL_REQUEST_STATUS_H_
+#include "net/base/net_errors.h"
#include "net/base/net_export.h"
namespace net {
@@ -41,6 +42,13 @@ class NET_EXPORT URLRequestStatus {
// deprecated. See https://crbug.com/490311.
static URLRequestStatus FromError(int error);
+ // Returns a Error corresponding to |status_|.
+ // OK for OK
+ // ERR_IO_PENDING for IO_PENDING
+ // ERR_ABORTED for CANCELLED
+ // Error for FAILED
+ Error ToNetError() const;
+
Status status() const { return status_; }
int error() const { return error_; }
diff --git a/net/url_request/url_request_test_job.cc b/net/url_request/url_request_test_job.cc
index c808d0b..bd1c905 100644
--- a/net/url_request/url_request_test_job.cc
+++ b/net/url_request/url_request_test_job.cc
@@ -210,8 +210,8 @@ void URLRequestTestJob::StartAsync() {
// unexpected url, return error
// FIXME(brettw) we may want to use WININET errors or have some more types
// of errors
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- ERR_INVALID_URL));
+ NotifyStartError(
+ URLRequestStatus(URLRequestStatus::FAILED, ERR_INVALID_URL));
// FIXME(brettw): this should emulate a network error, and not just fail
// initiating a connection
return;
@@ -223,21 +223,15 @@ void URLRequestTestJob::StartAsync() {
this->NotifyHeadersComplete();
}
-bool URLRequestTestJob::ReadRawData(IOBuffer* buf, int buf_size,
- int *bytes_read) {
+int URLRequestTestJob::ReadRawData(IOBuffer* buf, int buf_size) {
if (stage_ == WAITING) {
async_buf_ = buf;
async_buf_size_ = buf_size;
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- return false;
+ return ERR_IO_PENDING;
}
- DCHECK(bytes_read);
- *bytes_read = 0;
-
- if (offset_ >= static_cast<int>(response_data_.length())) {
- return true; // done reading
- }
+ if (offset_ >= static_cast<int>(response_data_.length()))
+ return 0; // done reading
int to_read = buf_size;
if (to_read + offset_ > static_cast<int>(response_data_.length()))
@@ -246,8 +240,7 @@ bool URLRequestTestJob::ReadRawData(IOBuffer* buf, int buf_size,
memcpy(buf->data(), &response_data_.c_str()[offset_], to_read);
offset_ += to_read;
- *bytes_read = to_read;
- return true;
+ return to_read;
}
void URLRequestTestJob::GetResponseInfo(HttpResponseInfo* info) {
@@ -305,16 +298,15 @@ void URLRequestTestJob::ProcessNextOperation() {
stage_ = DATA_AVAILABLE;
// OK if ReadRawData wasn't called yet.
if (async_buf_) {
- int bytes_read;
- if (!ReadRawData(async_buf_, async_buf_size_, &bytes_read))
- NOTREACHED() << "This should not return false in DATA_AVAILABLE.";
- SetStatus(URLRequestStatus()); // clear the io pending flag
+ int result = ReadRawData(async_buf_, async_buf_size_);
+ if (result < 0)
+ NOTREACHED() << "Reads should not fail in DATA_AVAILABLE.";
if (NextReadAsync()) {
// Make all future reads return io pending until the next
// ProcessNextOperation().
stage_ = WAITING;
}
- NotifyReadComplete(bytes_read);
+ ReadRawDataComplete(result);
}
break;
case DATA_AVAILABLE:
diff --git a/net/url_request/url_request_test_job.h b/net/url_request/url_request_test_job.h
index 09b31a4..eb1db01 100644
--- a/net/url_request/url_request_test_job.h
+++ b/net/url_request/url_request_test_job.h
@@ -111,7 +111,7 @@ class NET_EXPORT_PRIVATE URLRequestTestJob : public URLRequestJob {
// Job functions
void SetPriority(RequestPriority priority) override;
void Start() override;
- bool ReadRawData(IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(IOBuffer* buf, int buf_size) override;
void Kill() override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(HttpResponseInfo* info) override;
diff --git a/storage/browser/blob/blob_url_request_job.cc b/storage/browser/blob/blob_url_request_job.cc
index 6ec2fe5..deea300 100644
--- a/storage/browser/blob/blob_url_request_job.cc
+++ b/storage/browser/blob/blob_url_request_job.cc
@@ -75,41 +75,37 @@ void BlobURLRequestJob::Kill() {
weak_factory_.InvalidateWeakPtrs();
}
-bool BlobURLRequestJob::ReadRawData(net::IOBuffer* dest,
- int dest_size,
- int* bytes_read) {
+int BlobURLRequestJob::ReadRawData(net::IOBuffer* dest, int dest_size) {
TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
DCHECK_NE(dest_size, 0);
- DCHECK(bytes_read);
- // Bail out immediately if we encounter an error.
- if (error_) {
- *bytes_read = 0;
- return true;
- }
+ // Bail out immediately if we encounter an error. This happens if a previous
+ // ReadRawData signalled an error to its caller but the caller called
+ // ReadRawData again anyway.
+ if (error_)
+ return 0;
+ int bytes_read = 0;
BlobReader::Status read_status =
- blob_reader_->Read(dest, dest_size, bytes_read,
+ blob_reader_->Read(dest, dest_size, &bytes_read,
base::Bind(&BlobURLRequestJob::DidReadRawData,
weak_factory_.GetWeakPtr()));
switch (read_status) {
case BlobReader::Status::NET_ERROR:
- NotifyFailure(blob_reader_->net_error());
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
- return false;
+ return blob_reader_->net_error();
case BlobReader::Status::IO_PENDING:
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- return false;
+ return net::ERR_IO_PENDING;
case BlobReader::Status::DONE:
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
- return true;
+ return bytes_read;
}
NOTREACHED();
- return true;
+ return 0;
}
bool BlobURLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -222,13 +218,7 @@ void BlobURLRequestJob::DidCalculateSize(int result) {
void BlobURLRequestJob::DidReadRawData(int result) {
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
blob_handle_ ? blob_handle_->uuid() : "NotFound");
- if (result < 0) {
- NotifyFailure(result);
- return;
- }
- // Clear the IO_PENDING status
- SetStatus(net::URLRequestStatus());
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
}
void BlobURLRequestJob::NotifyFailure(int error_code) {
@@ -236,11 +226,7 @@ void BlobURLRequestJob::NotifyFailure(int error_code) {
// If we already return the headers on success, we can't change the headers
// now. Instead, we just error out.
- if (response_info_) {
- NotifyDone(
- net::URLRequestStatus(net::URLRequestStatus::FAILED, error_code));
- return;
- }
+ DCHECK(!response_info_) << "Cannot NotifyFailure after headers.";
net::HttpStatusCode status_code = net::HTTP_INTERNAL_SERVER_ERROR;
switch (error_code) {
diff --git a/storage/browser/blob/blob_url_request_job.h b/storage/browser/blob/blob_url_request_job.h
index 21baa2c..1f0b9fb 100644
--- a/storage/browser/blob/blob_url_request_job.h
+++ b/storage/browser/blob/blob_url_request_job.h
@@ -44,7 +44,7 @@ class STORAGE_EXPORT BlobURLRequestJob
// net::URLRequestJob methods.
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
bool GetMimeType(std::string* mime_type) const override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
int GetResponseCode() const override;
diff --git a/storage/browser/fileapi/file_system_dir_url_request_job.cc b/storage/browser/fileapi/file_system_dir_url_request_job.cc
index 35b6c1d..2a0984b7 100644
--- a/storage/browser/fileapi/file_system_dir_url_request_job.cc
+++ b/storage/browser/fileapi/file_system_dir_url_request_job.cc
@@ -14,7 +14,6 @@
#include "base/time/time.h"
#include "build/build_config.h"
#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/url_request/url_request.h"
#include "storage/browser/fileapi/file_system_context.h"
@@ -44,15 +43,14 @@ FileSystemDirURLRequestJob::FileSystemDirURLRequestJob(
FileSystemDirURLRequestJob::~FileSystemDirURLRequestJob() {
}
-bool FileSystemDirURLRequestJob::ReadRawData(net::IOBuffer* dest, int dest_size,
- int *bytes_read) {
- int count = std::min(dest_size, static_cast<int>(data_.size()));
+int FileSystemDirURLRequestJob::ReadRawData(net::IOBuffer* dest,
+ int dest_size) {
+ int count = std::min(dest_size, base::checked_cast<int>(data_.size()));
if (count > 0) {
memcpy(dest->data(), data_.data(), count);
data_.erase(0, count);
}
- *bytes_read = count;
- return true;
+ return count;
}
void FileSystemDirURLRequestJob::Start() {
@@ -98,8 +96,7 @@ void FileSystemDirURLRequestJob::StartAsync() {
false);
return;
}
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- net::ERR_FILE_NOT_FOUND));
+ NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
return;
}
file_system_context_->operation_runner()->ReadDirectory(
@@ -112,8 +109,7 @@ void FileSystemDirURLRequestJob::DidAttemptAutoMount(base::File::Error result) {
file_system_context_->CrackURL(request_->url()).is_valid()) {
StartAsync();
} else {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- net::ERR_FILE_NOT_FOUND));
+ NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
}
}
@@ -125,7 +121,7 @@ void FileSystemDirURLRequestJob::DidReadDirectory(
int rv = net::ERR_FILE_NOT_FOUND;
if (result == base::File::FILE_ERROR_INVALID_URL)
rv = net::ERR_INVALID_URL;
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ NotifyStartError(URLRequestStatus::FromError(rv));
return;
}
diff --git a/storage/browser/fileapi/file_system_dir_url_request_job.h b/storage/browser/fileapi/file_system_dir_url_request_job.h
index 8715c96..56b13ff 100644
--- a/storage/browser/fileapi/file_system_dir_url_request_job.h
+++ b/storage/browser/fileapi/file_system_dir_url_request_job.h
@@ -33,7 +33,7 @@ class STORAGE_EXPORT_PRIVATE FileSystemDirURLRequestJob
// URLRequestJob methods:
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
bool GetCharset(std::string* charset) override;
// FilterContext methods (via URLRequestJob):
diff --git a/storage/browser/fileapi/file_system_url_request_job.cc b/storage/browser/fileapi/file_system_url_request_job.cc
index 9572721..6c33fc8 100644
--- a/storage/browser/fileapi/file_system_url_request_job.cc
+++ b/storage/browser/fileapi/file_system_url_request_job.cc
@@ -62,8 +62,8 @@ FileSystemURLRequestJob::FileSystemURLRequestJob(
file_system_context_(file_system_context),
is_directory_(false),
remaining_bytes_(0),
- weak_factory_(this) {
-}
+ range_parse_result_(net::OK),
+ weak_factory_(this) {}
FileSystemURLRequestJob::~FileSystemURLRequestJob() {}
@@ -80,39 +80,28 @@ void FileSystemURLRequestJob::Kill() {
weak_factory_.InvalidateWeakPtrs();
}
-bool FileSystemURLRequestJob::ReadRawData(net::IOBuffer* dest,
- int dest_size,
- int* bytes_read) {
+int FileSystemURLRequestJob::ReadRawData(net::IOBuffer* dest, int dest_size) {
DCHECK_NE(dest_size, 0);
- DCHECK(bytes_read);
DCHECK_GE(remaining_bytes_, 0);
if (reader_.get() == NULL)
- return false;
+ return net::ERR_FAILED;
if (remaining_bytes_ < dest_size)
- dest_size = static_cast<int>(remaining_bytes_);
+ dest_size = remaining_bytes_;
- if (!dest_size) {
- *bytes_read = 0;
- return true;
- }
+ if (!dest_size)
+ return 0;
const int rv = reader_->Read(dest, dest_size,
base::Bind(&FileSystemURLRequestJob::DidRead,
weak_factory_.GetWeakPtr()));
if (rv >= 0) {
- // Data is immediately available.
- *bytes_read = rv;
remaining_bytes_ -= rv;
DCHECK_GE(remaining_bytes_, 0);
- return true;
}
- if (rv == net::ERR_IO_PENDING)
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- else
- NotifyFailed(rv);
- return false;
+
+ return rv;
}
bool FileSystemURLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -127,8 +116,12 @@ bool FileSystemURLRequestJob::GetMimeType(std::string* mime_type) const {
void FileSystemURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
+ // Currently this job only cares about the Range header. Note that validation
+ // is deferred to DidGetMetaData(), because NotifyStartError is not legal to
+ // call since the job has not started.
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
std::vector<net::HttpByteRange> ranges;
+
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
byte_range_ = ranges[0];
@@ -136,7 +129,7 @@ void FileSystemURLRequestJob::SetExtraRequestHeaders(
// We don't support multiple range requests in one single URL request.
// TODO(adamk): decide whether we want to support multiple range
// requests.
- NotifyFailed(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
+ range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE;
}
}
}
@@ -168,7 +161,7 @@ void FileSystemURLRequestJob::StartAsync() {
}
if (!file_system_context_->CanServeURLRequest(url_)) {
// In incognito mode the API is not usable and there should be no data.
- NotifyFailed(net::ERR_FILE_NOT_FOUND);
+ NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
return;
}
file_system_context_->operation_runner()->GetMetadata(
@@ -182,7 +175,7 @@ void FileSystemURLRequestJob::DidAttemptAutoMount(base::File::Error result) {
file_system_context_->CrackURL(request_->url()).is_valid()) {
StartAsync();
} else {
- NotifyFailed(net::ERR_FILE_NOT_FOUND);
+ NotifyStartError(URLRequestStatus::FromError(net::ERR_FILE_NOT_FOUND));
}
}
@@ -190,8 +183,10 @@ void FileSystemURLRequestJob::DidGetMetadata(
base::File::Error error_code,
const base::File::Info& file_info) {
if (error_code != base::File::FILE_OK) {
- NotifyFailed(error_code == base::File::FILE_ERROR_INVALID_URL ?
- net::ERR_INVALID_URL : net::ERR_FILE_NOT_FOUND);
+ NotifyStartError(URLRequestStatus::FromError(
+ error_code == base::File::FILE_ERROR_INVALID_URL
+ ? net::ERR_INVALID_URL
+ : net::ERR_FILE_NOT_FOUND));
return;
}
@@ -201,8 +196,14 @@ void FileSystemURLRequestJob::DidGetMetadata(
is_directory_ = file_info.is_directory;
+ if (range_parse_result_ != net::OK) {
+ NotifyStartError(URLRequestStatus::FromError(range_parse_result_));
+ return;
+ }
+
if (!byte_range_.ComputeBounds(file_info.size)) {
- NotifyFailed(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
+ NotifyStartError(
+ URLRequestStatus::FromError(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
return;
}
@@ -226,17 +227,12 @@ void FileSystemURLRequestJob::DidGetMetadata(
}
void FileSystemURLRequestJob::DidRead(int result) {
- if (result > 0)
- SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
- else if (result == 0)
- NotifyDone(URLRequestStatus());
- else
- NotifyFailed(result);
-
- remaining_bytes_ -= result;
- DCHECK_GE(remaining_bytes_, 0);
+ if (result >= 0) {
+ remaining_bytes_ -= result;
+ DCHECK_GE(remaining_bytes_, 0);
+ }
- NotifyReadComplete(result);
+ ReadRawDataComplete(result);
}
bool FileSystemURLRequestJob::IsRedirectResponse(GURL* location,
@@ -256,8 +252,4 @@ bool FileSystemURLRequestJob::IsRedirectResponse(GURL* location,
return false;
}
-void FileSystemURLRequestJob::NotifyFailed(int rv) {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
-}
-
} // namespace storage
diff --git a/storage/browser/fileapi/file_system_url_request_job.h b/storage/browser/fileapi/file_system_url_request_job.h
index 0021210..09271f5 100644
--- a/storage/browser/fileapi/file_system_url_request_job.h
+++ b/storage/browser/fileapi/file_system_url_request_job.h
@@ -11,6 +11,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
+#include "net/base/net_errors.h"
#include "net/http/http_byte_range.h"
#include "net/url_request/url_request_job.h"
#include "storage/browser/fileapi/file_system_url.h"
@@ -42,7 +43,7 @@ class STORAGE_EXPORT_PRIVATE FileSystemURLRequestJob
// URLRequestJob methods:
void Start() override;
void Kill() override;
- bool ReadRawData(net::IOBuffer* buf, int buf_size, int* bytes_read) override;
+ int ReadRawData(net::IOBuffer* buf, int buf_size) override;
bool IsRedirectResponse(GURL* location, int* http_status_code) override;
void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers) override;
void GetResponseInfo(net::HttpResponseInfo* info) override;
@@ -61,7 +62,6 @@ class STORAGE_EXPORT_PRIVATE FileSystemURLRequestJob
void DidGetMetadata(base::File::Error error_code,
const base::File::Info& file_info);
void DidRead(int result);
- void NotifyFailed(int rv);
const std::string storage_domain_;
FileSystemContext* file_system_context_;
@@ -70,6 +70,7 @@ class STORAGE_EXPORT_PRIVATE FileSystemURLRequestJob
bool is_directory_;
scoped_ptr<net::HttpResponseInfo> response_info_;
int64 remaining_bytes_;
+ net::Error range_parse_result_;
net::HttpByteRange byte_range_;
base::WeakPtrFactory<FileSystemURLRequestJob> weak_factory_;