summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--content/browser/fileapi/blob_reader_unittest.cc1111
-rw-r--r--content/browser/fileapi/blob_url_request_job_unittest.cc31
-rw-r--r--content/browser/loader/upload_data_stream_builder.cc107
-rw-r--r--content/browser/loader/upload_data_stream_builder.h4
-rw-r--r--content/browser/loader/upload_data_stream_builder_unittest.cc392
-rw-r--r--content/content_tests.gypi1
-rw-r--r--net/base/upload_disk_cache_entry_element_reader.cc90
-rw-r--r--net/base/upload_disk_cache_entry_element_reader.h72
-rw-r--r--net/base/upload_disk_cache_entry_element_reader_unittest.cc331
-rw-r--r--net/base/upload_element_reader.cc5
-rw-r--r--net/base/upload_element_reader.h6
-rw-r--r--net/net.gypi3
-rw-r--r--storage/browser/BUILD.gn4
-rw-r--r--storage/browser/blob/blob_data_handle.cc90
-rw-r--r--storage/browser/blob/blob_data_handle.h31
-rw-r--r--storage/browser/blob/blob_data_snapshot.h1
-rw-r--r--storage/browser/blob/blob_reader.cc568
-rw-r--r--storage/browser/blob/blob_reader.h190
-rw-r--r--storage/browser/blob/blob_storage_context.cc6
-rw-r--r--storage/browser/blob/blob_url_request_job.cc540
-rw-r--r--storage/browser/blob/blob_url_request_job.h63
-rw-r--r--storage/browser/blob/blob_url_request_job_factory.cc29
-rw-r--r--storage/browser/blob/blob_url_request_job_factory.h6
-rw-r--r--storage/browser/blob/upload_blob_element_reader.cc67
-rw-r--r--storage/browser/blob/upload_blob_element_reader.h54
-rw-r--r--storage/storage_browser.gyp5
26 files changed, 2311 insertions, 1496 deletions
diff --git a/content/browser/fileapi/blob_reader_unittest.cc b/content/browser/fileapi/blob_reader_unittest.cc
new file mode 100644
index 0000000..a7e095c
--- /dev/null
+++ b/content/browser/fileapi/blob_reader_unittest.cc
@@ -0,0 +1,1111 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "storage/browser/blob/blob_reader.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/location.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/task_runner.h"
+#include "base/time/time.h"
+#include "content/public/test/async_file_test_helper.h"
+#include "content/public/test/test_file_system_context.h"
+#include "net/base/completion_callback.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/disk_cache.h"
+#include "storage/browser/blob/blob_data_builder.h"
+#include "storage/browser/blob/blob_data_handle.h"
+#include "storage/browser/blob/blob_storage_context.h"
+#include "storage/browser/fileapi/file_stream_reader.h"
+#include "storage/browser/fileapi/file_system_context.h"
+#include "storage/browser/fileapi/file_system_file_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "url/gurl.h"
+
+using base::FilePath;
+using content::AsyncFileTestHelper;
+using net::DrainableIOBuffer;
+using net::IOBuffer;
+
+namespace storage {
+namespace {
+
+const int kTestDiskCacheStreamIndex = 0;
+
+// Our disk cache tests don't need a real data handle since the tests themselves
+// scope the disk cache and entries.
+class EmptyDataHandle : public storage::BlobDataBuilder::DataHandle {
+ private:
+ ~EmptyDataHandle() override {}
+};
+
+// A disk_cache::Entry that arbitrarily delays the completion of a read
+// operation to allow testing some races without flake. This is particularly
+// relevant in this unit test, which uses the always-synchronous MEMORY_CACHE.
+class DelayedReadEntry : public disk_cache::Entry {
+ public:
+ explicit DelayedReadEntry(disk_cache::ScopedEntryPtr entry)
+ : entry_(entry.Pass()) {}
+ ~DelayedReadEntry() override { EXPECT_FALSE(HasPendingReadCallbacks()); }
+
+ bool HasPendingReadCallbacks() { return !pending_read_callbacks_.empty(); }
+
+ void RunPendingReadCallbacks() {
+ std::vector<base::Callback<void(void)>> callbacks;
+ pending_read_callbacks_.swap(callbacks);
+ for (const auto& callback : callbacks)
+ callback.Run();
+ }
+
+ // From disk_cache::Entry:
+ void Doom() override { entry_->Doom(); }
+
+ void Close() override { delete this; } // Note this is required by the API.
+
+ std::string GetKey() const override { return entry_->GetKey(); }
+
+ base::Time GetLastUsed() const override { return entry_->GetLastUsed(); }
+
+ base::Time GetLastModified() const override {
+ return entry_->GetLastModified();
+ }
+
+ int32 GetDataSize(int index) const override {
+ return entry_->GetDataSize(index);
+ }
+
+ int ReadData(int index,
+ int offset,
+ IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& original_callback) override {
+ net::TestCompletionCallback callback;
+ int rv = entry_->ReadData(index, offset, buf, buf_len, callback.callback());
+ DCHECK_NE(rv, net::ERR_IO_PENDING)
+ << "Test expects to use a MEMORY_CACHE instance, which is synchronous.";
+ pending_read_callbacks_.push_back(base::Bind(original_callback, rv));
+ return net::ERR_IO_PENDING;
+ }
+
+ int WriteData(int index,
+ int offset,
+ IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) override {
+ return entry_->WriteData(index, offset, buf, buf_len, callback, truncate);
+ }
+
+ int ReadSparseData(int64 offset,
+ IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) override {
+ return entry_->ReadSparseData(offset, buf, buf_len, callback);
+ }
+
+ int WriteSparseData(int64 offset,
+ IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) override {
+ return entry_->WriteSparseData(offset, buf, buf_len, callback);
+ }
+
+ int GetAvailableRange(int64 offset,
+ int len,
+ int64* start,
+ const CompletionCallback& callback) override {
+ return entry_->GetAvailableRange(offset, len, start, callback);
+ }
+
+ bool CouldBeSparse() const override { return entry_->CouldBeSparse(); }
+
+ void CancelSparseIO() override { entry_->CancelSparseIO(); }
+
+ int ReadyForSparseIO(const CompletionCallback& callback) override {
+ return entry_->ReadyForSparseIO(callback);
+ }
+
+ private:
+ disk_cache::ScopedEntryPtr entry_;
+ std::vector<base::Callback<void(void)>> pending_read_callbacks_;
+};
+
+scoped_ptr<disk_cache::Backend> CreateInMemoryDiskCache(
+ const scoped_refptr<base::SingleThreadTaskRunner>& thread) {
+ scoped_ptr<disk_cache::Backend> cache;
+ net::TestCompletionCallback callback;
+ int rv = disk_cache::CreateCacheBackend(
+ net::MEMORY_CACHE, net::CACHE_BACKEND_DEFAULT, FilePath(), 0, false,
+ thread, nullptr, &cache, callback.callback());
+ EXPECT_EQ(net::OK, callback.GetResult(rv));
+
+ return cache.Pass();
+}
+
+disk_cache::ScopedEntryPtr CreateDiskCacheEntry(disk_cache::Backend* cache,
+ const char* key,
+ const std::string& data) {
+ disk_cache::Entry* temp_entry = nullptr;
+ net::TestCompletionCallback callback;
+ int rv = cache->CreateEntry(key, &temp_entry, callback.callback());
+ if (callback.GetResult(rv) != net::OK)
+ return nullptr;
+ disk_cache::ScopedEntryPtr entry(temp_entry);
+
+ scoped_refptr<net::StringIOBuffer> iobuffer = new net::StringIOBuffer(data);
+ rv = entry->WriteData(kTestDiskCacheStreamIndex, 0, iobuffer.get(),
+ iobuffer->size(), callback.callback(), false);
+ EXPECT_EQ(static_cast<int>(data.size()), callback.GetResult(rv));
+ return entry.Pass();
+}
+
+template <typename T>
+void SetValue(T* address, T value) {
+ *address = value;
+}
+
+class FakeFileStreamReader : public FileStreamReader {
+ public:
+ explicit FakeFileStreamReader(const std::string& contents)
+ : buffer_(new DrainableIOBuffer(
+ new net::StringIOBuffer(
+ scoped_ptr<std::string>(new std::string(contents))),
+ contents.size())),
+ net_error_(net::OK),
+ size_(contents.size()) {}
+ FakeFileStreamReader(const std::string& contents, uint64_t size)
+ : buffer_(new DrainableIOBuffer(
+ new net::StringIOBuffer(
+ scoped_ptr<std::string>(new std::string(contents))),
+ contents.size())),
+ net_error_(net::OK),
+ size_(size) {}
+
+ ~FakeFileStreamReader() override {}
+
+ void SetReturnError(int net_error) { net_error_ = net_error; }
+
+ void SetAsyncRunner(base::SingleThreadTaskRunner* runner) {
+ async_task_runner_ = runner;
+ }
+
+ int Read(net::IOBuffer* buf,
+ int buf_length,
+ const net::CompletionCallback& done) override {
+ DCHECK(buf);
+ // When async_task_runner_ is not set, return synchronously.
+ if (!async_task_runner_.get()) {
+ if (net_error_ == net::OK) {
+ return ReadImpl(buf, buf_length, net::CompletionCallback());
+ } else {
+ return net_error_;
+ }
+ }
+
+ // Otherwise always return asynchronously.
+ if (net_error_ == net::OK) {
+ async_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(base::IgnoreResult(&FakeFileStreamReader::ReadImpl),
+ base::Unretained(this), make_scoped_refptr(buf),
+ buf_length, done));
+ } else {
+ async_task_runner_->PostTask(FROM_HERE, base::Bind(done, net_error_));
+ }
+ return net::ERR_IO_PENDING;
+ }
+
+ int64 GetLength(const net::Int64CompletionCallback& size_callback) override {
+ // When async_task_runner_ is not set, return synchronously.
+ if (!async_task_runner_.get()) {
+ if (net_error_ == net::OK) {
+ return size_;
+ } else {
+ return net_error_;
+ }
+ }
+ if (net_error_ == net::OK) {
+ async_task_runner_->PostTask(FROM_HERE, base::Bind(size_callback, size_));
+ } else {
+ async_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(size_callback, static_cast<int64_t>(net_error_)));
+ }
+ return net::ERR_IO_PENDING;
+ }
+
+ private:
+ int ReadImpl(scoped_refptr<net::IOBuffer> buf,
+ int buf_length,
+ const net::CompletionCallback& done) {
+ CHECK_GE(buf_length, 0);
+ int length = std::min(buf_length, buffer_->BytesRemaining());
+ memcpy(buf->data(), buffer_->data(), length);
+ buffer_->DidConsume(length);
+ if (done.is_null()) {
+ return length;
+ }
+ done.Run(length);
+ return net::ERR_IO_PENDING;
+ }
+
+ scoped_refptr<net::DrainableIOBuffer> buffer_;
+ scoped_refptr<base::SingleThreadTaskRunner> async_task_runner_;
+ int net_error_;
+ uint64_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeFileStreamReader);
+};
+
+class MockFileStreamReaderProvider
+ : public BlobReader::FileStreamReaderProvider {
+ public:
+ ~MockFileStreamReaderProvider() override {}
+
+ MOCK_METHOD4(CreateForLocalFileMock,
+ FileStreamReader*(base::TaskRunner* task_runner,
+ const FilePath& file_path,
+ int64_t initial_offset,
+ const base::Time& expected_modification_time));
+ MOCK_METHOD4(CreateFileStreamReaderMock,
+ FileStreamReader*(const GURL& filesystem_url,
+ int64_t offset,
+ int64_t max_bytes_to_read,
+ const base::Time& expected_modification_time));
+ // Since we're returning a move-only type, we have to do some delegation for
+ // gmock.
+ scoped_ptr<FileStreamReader> CreateForLocalFile(
+ base::TaskRunner* task_runner,
+ const base::FilePath& file_path,
+ int64_t initial_offset,
+ const base::Time& expected_modification_time) override {
+ return make_scoped_ptr(CreateForLocalFileMock(
+ task_runner, file_path, initial_offset, expected_modification_time));
+ }
+
+ scoped_ptr<FileStreamReader> CreateFileStreamReader(
+ const GURL& filesystem_url,
+ int64_t offset,
+ int64_t max_bytes_to_read,
+ const base::Time& expected_modification_time) override {
+ return make_scoped_ptr(CreateFileStreamReaderMock(
+ filesystem_url, offset, max_bytes_to_read, expected_modification_time));
+ }
+};
+
+} // namespace
+
+class BlobReaderTest : public ::testing::Test {
+ public:
+ BlobReaderTest() {}
+ ~BlobReaderTest() override {}
+
+ void TearDown() override {
+ reader_.reset();
+ blob_handle_.reset();
+ message_loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
+ }
+
+ protected:
+ void InitializeReader(BlobDataBuilder* builder) {
+ blob_handle_ = builder ? context_.AddFinishedBlob(builder).Pass() : nullptr;
+ provider_ = new MockFileStreamReaderProvider();
+ scoped_ptr<BlobReader::FileStreamReaderProvider> temp_ptr(provider_);
+ reader_.reset(new BlobReader(blob_handle_.get(), temp_ptr.Pass(),
+ message_loop_.task_runner().get()));
+ }
+
+ // Takes ownership of the file reader (the blob reader takes ownership).
+ void ExpectLocalFileCall(const FilePath& file_path,
+ base::Time modification_time,
+ uint64_t initial_offset,
+ FakeFileStreamReader* reader) {
+ EXPECT_CALL(*provider_, CreateForLocalFileMock(
+ message_loop_.task_runner().get(), file_path,
+ initial_offset, modification_time))
+ .WillOnce(testing::Return(reader));
+ }
+
+ // Takes ownership of the file reader (the blob reader takes ownership).
+ void ExpectFileSystemCall(const GURL& filesystem_url,
+ int64_t offset,
+ int64_t max_bytes_to_read,
+ base::Time expected_modification_time,
+ FakeFileStreamReader* reader) {
+ EXPECT_CALL(*provider_, CreateFileStreamReaderMock(
+ filesystem_url, offset, max_bytes_to_read,
+ expected_modification_time))
+ .WillOnce(testing::Return(reader));
+ }
+
+ void CheckSizeCalculatedSynchronously(size_t expected_size, int async_size) {
+ EXPECT_EQ(-1, async_size);
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(expected_size, reader_->total_size());
+ EXPECT_TRUE(reader_->total_size_calculated());
+ }
+
+ void CheckSizeNotCalculatedYet(int async_size) {
+ EXPECT_EQ(-1, async_size);
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_FALSE(reader_->total_size_calculated());
+ }
+
+ void CheckSizeCalculatedAsynchronously(size_t expected_size,
+ int async_result) {
+ EXPECT_EQ(net::OK, async_result);
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(expected_size, reader_->total_size());
+ EXPECT_TRUE(reader_->total_size_calculated());
+ }
+
+ scoped_refptr<net::IOBuffer> CreateBuffer(uint64_t size) {
+ return scoped_refptr<net::IOBuffer>(
+ new net::IOBuffer(static_cast<size_t>(size)));
+ }
+
+ bool IsReaderTotalSizeCalculated() {
+ return reader_->total_size_calculated();
+ }
+
+ BlobStorageContext context_;
+ scoped_ptr<BlobDataHandle> blob_handle_;
+ MockFileStreamReaderProvider* provider_ = nullptr;
+ base::MessageLoop message_loop_;
+ scoped_ptr<BlobReader> reader_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BlobReaderTest);
+};
+
+namespace {
+
+TEST_F(BlobReaderTest, BasicMemory) {
+ BlobDataBuilder b("uuid");
+ const std::string kData("Hello!!!");
+ const size_t kDataSize = 8ul;
+ b.AppendData(kData);
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kDataSize, size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kDataSize, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kDataSize, static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "Hello!!!", kDataSize));
+}
+
+TEST_F(BlobReaderTest, BasicFile) {
+ BlobDataBuilder b("uuid");
+ const FilePath kPath = FilePath::FromUTF8Unsafe("/fake/file.txt");
+ const std::string kData = "FileData!!!";
+ const base::Time kTime = base::Time::Now();
+ b.AppendFile(kPath, 0, kData.size(), kTime);
+ this->InitializeReader(&b);
+
+ // Non-async reader.
+ ExpectLocalFileCall(kPath, kTime, 0, new FakeFileStreamReader(kData));
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kData.size(), static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "FileData!!!", kData.size()));
+}
+
+TEST_F(BlobReaderTest, BasicFileSystem) {
+ BlobDataBuilder b("uuid");
+ const GURL kURL("file://test_file/here.txt");
+ const std::string kData = "FileData!!!";
+ const base::Time kTime = base::Time::Now();
+ b.AppendFileSystemFile(kURL, 0, kData.size(), kTime);
+ this->InitializeReader(&b);
+
+ // Non-async reader.
+ ExpectFileSystemCall(kURL, 0, kData.size(), kTime,
+ new FakeFileStreamReader(kData));
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kData.size(), static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "FileData!!!", kData.size()));
+}
+
+TEST_F(BlobReaderTest, BasicDiskCache) {
+ scoped_ptr<disk_cache::Backend> cache =
+ CreateInMemoryDiskCache(message_loop_.task_runner());
+ ASSERT_TRUE(cache);
+
+ BlobDataBuilder b("uuid");
+ const std::string kData = "Test Blob Data";
+ scoped_refptr<BlobDataBuilder::DataHandle> data_handle =
+ new EmptyDataHandle();
+ disk_cache::ScopedEntryPtr entry =
+ CreateDiskCacheEntry(cache.get(), "test entry", kData);
+ b.AppendDiskCacheEntry(data_handle, entry.get(), kTestDiskCacheStreamIndex);
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kData.size(), static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "Test Blob Data", kData.size()));
+}
+
+TEST_F(BlobReaderTest, BufferLargerThanMemory) {
+ BlobDataBuilder b("uuid");
+ const std::string kData("Hello!!!");
+ const size_t kDataSize = 8ul;
+ const size_t kBufferSize = 10ul;
+ b.AppendData(kData);
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kBufferSize, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kDataSize, static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "Hello!!!", kDataSize));
+}
+
+TEST_F(BlobReaderTest, MemoryRange) {
+ BlobDataBuilder b("uuid");
+ const std::string kData("Hello!!!");
+ const size_t kDataSize = 8ul;
+ const size_t kSeekOffset = 2ul;
+ const uint64_t kReadLength = 4ull;
+ b.AppendData(kData);
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer = CreateBuffer(kReadLength);
+
+ reader_->SetReadRange(kSeekOffset, kReadLength);
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kDataSize - kSeekOffset, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kReadLength, static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "llo!", kReadLength));
+}
+
+TEST_F(BlobReaderTest, BufferSmallerThanMemory) {
+ BlobDataBuilder b("uuid");
+ const std::string kData("Hello!!!");
+ const size_t kBufferSize = 4ul;
+ b.AppendData(kData);
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kBufferSize, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kBufferSize, static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "Hell", kBufferSize));
+
+ bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kBufferSize, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kBufferSize, static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "o!!!", kBufferSize));
+}
+
+TEST_F(BlobReaderTest, SegmentedBufferAndMemory) {
+ BlobDataBuilder b("uuid");
+ const size_t kNumItems = 10;
+ const size_t kItemSize = 6;
+ const size_t kBufferSize = 10;
+ const size_t kTotalSize = kNumItems * kItemSize;
+ char current_value = 0;
+ for (size_t i = 0; i < kNumItems; i++) {
+ char buf[kItemSize];
+ for (size_t j = 0; j < kItemSize; j++) {
+ buf[j] = current_value++;
+ }
+ b.AppendData(buf, kItemSize);
+ }
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kTotalSize, size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
+
+ current_value = 0;
+ for (size_t i = 0; i < kTotalSize / kBufferSize; i++) {
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kBufferSize, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kBufferSize, static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ for (size_t j = 0; j < kBufferSize; j++) {
+ EXPECT_EQ(current_value, buffer->data()[j]);
+ current_value++;
+ }
+ }
+}
+
+TEST_F(BlobReaderTest, FileAsync) {
+ BlobDataBuilder b("uuid");
+ const FilePath kPath = FilePath::FromUTF8Unsafe("/fake/file.txt");
+ const std::string kData = "FileData!!!";
+ const base::Time kTime = base::Time::Now();
+ b.AppendFile(kPath, 0, kData.size(), kTime);
+ this->InitializeReader(&b);
+
+ scoped_ptr<FakeFileStreamReader> reader(new FakeFileStreamReader(kData));
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+
+ ExpectLocalFileCall(kPath, kTime, 0, reader.release());
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeNotCalculatedYet(size_result);
+ message_loop_.RunUntilIdle();
+ CheckSizeCalculatedAsynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kData.size(), static_cast<size_t>(async_bytes_read));
+ EXPECT_EQ(0, bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "FileData!!!", kData.size()));
+}
+
+TEST_F(BlobReaderTest, FileSystemAsync) {
+ BlobDataBuilder b("uuid");
+ const GURL kURL("file://test_file/here.txt");
+ const std::string kData = "FileData!!!";
+ const base::Time kTime = base::Time::Now();
+ b.AppendFileSystemFile(kURL, 0, kData.size(), kTime);
+ this->InitializeReader(&b);
+
+ scoped_ptr<FakeFileStreamReader> reader(new FakeFileStreamReader(kData));
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+
+ ExpectFileSystemCall(kURL, 0, kData.size(), kTime, reader.release());
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeNotCalculatedYet(size_result);
+ message_loop_.RunUntilIdle();
+ CheckSizeCalculatedAsynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kData.size(), static_cast<size_t>(async_bytes_read));
+ EXPECT_EQ(0, bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "FileData!!!", kData.size()));
+}
+
+TEST_F(BlobReaderTest, DiskCacheAsync) {
+ scoped_ptr<disk_cache::Backend> cache =
+ CreateInMemoryDiskCache(message_loop_.task_runner());
+ ASSERT_TRUE(cache);
+
+ BlobDataBuilder b("uuid");
+ const std::string kData = "Test Blob Data";
+ scoped_refptr<BlobDataBuilder::DataHandle> data_handle =
+ new EmptyDataHandle();
+ scoped_ptr<DelayedReadEntry> delayed_read_entry(new DelayedReadEntry(
+ CreateDiskCacheEntry(cache.get(), "test entry", kData).Pass()));
+ b.AppendDiskCacheEntry(data_handle, delayed_read_entry.get(),
+ kTestDiskCacheStreamIndex);
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeCalculatedSynchronously(kData.size(), size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_TRUE(delayed_read_entry->HasPendingReadCallbacks());
+ delayed_read_entry->RunPendingReadCallbacks();
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(0, bytes_read);
+ EXPECT_EQ(kData.size(), static_cast<size_t>(async_bytes_read));
+ EXPECT_EQ(0, memcmp(buffer->data(), "Test Blob Data", kData.size()));
+}
+
+TEST_F(BlobReaderTest, FileRange) {
+ BlobDataBuilder b("uuid");
+ const FilePath kPath = FilePath::FromUTF8Unsafe("/fake/file.txt");
+ // We check the offset in the ExpectLocalFileCall mock.
+ const std::string kRangeData = "leD";
+ const std::string kData = "FileData!!!";
+ const uint64_t kOffset = 2;
+ const uint64_t kReadLength = 3;
+ const base::Time kTime = base::Time::Now();
+ b.AppendFile(kPath, 0, kData.size(), kTime);
+ this->InitializeReader(&b);
+
+ scoped_ptr<FakeFileStreamReader> reader(new FakeFileStreamReader(kData));
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+ ExpectLocalFileCall(kPath, kTime, 0, reader.release());
+
+ // We create the reader again with the offset after the seek.
+ reader.reset(new FakeFileStreamReader(kRangeData));
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+ ExpectLocalFileCall(kPath, kTime, kOffset, reader.release());
+
+ int size_result = -1;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ message_loop_.RunUntilIdle();
+
+ scoped_refptr<net::IOBuffer> buffer = CreateBuffer(kReadLength);
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->SetReadRange(kOffset, kReadLength));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->Read(buffer.get(), kReadLength, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kReadLength, static_cast<size_t>(async_bytes_read));
+ EXPECT_EQ(0, bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "leD", kReadLength));
+}
+
+TEST_F(BlobReaderTest, DiskCacheRange) {
+ scoped_ptr<disk_cache::Backend> cache =
+ CreateInMemoryDiskCache(message_loop_.task_runner());
+ ASSERT_TRUE(cache);
+
+ BlobDataBuilder b("uuid");
+ const std::string kData = "Test Blob Data";
+ const uint64_t kOffset = 2;
+ const uint64_t kReadLength = 3;
+ scoped_refptr<BlobDataBuilder::DataHandle> data_handle =
+ new EmptyDataHandle();
+ disk_cache::ScopedEntryPtr entry =
+ CreateDiskCacheEntry(cache.get(), "test entry", kData);
+ b.AppendDiskCacheEntry(data_handle, entry.get(), kTestDiskCacheStreamIndex);
+ this->InitializeReader(&b);
+
+ int size_result = -1;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+
+ scoped_refptr<net::IOBuffer> buffer = CreateBuffer(kReadLength);
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->SetReadRange(kOffset, kReadLength));
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->Read(buffer.get(), kReadLength, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(kReadLength, static_cast<size_t>(bytes_read));
+ EXPECT_EQ(0, async_bytes_read);
+ EXPECT_EQ(0, memcmp(buffer->data(), "st ", kReadLength));
+}
+
+TEST_F(BlobReaderTest, FileSomeAsyncSegmentedOffsetsUnknownSizes) {
+ // This tests includes:
+ // * Unknown file sizes (item length of uint64::max) for every other item.
+ // * Offsets for every 3rd file item.
+ // * Non-async reader for every 4th file item.
+ BlobDataBuilder b("uuid");
+ const FilePath kPathBase = FilePath::FromUTF8Unsafe("/fake/file.txt");
+ const base::Time kTime = base::Time::Now();
+ const size_t kNumItems = 10;
+ const size_t kItemSize = 6;
+ const size_t kBufferSize = 10;
+ const size_t kTotalSize = kNumItems * kItemSize;
+ char current_value = 0;
+ // Create blob and reader.
+ for (size_t i = 0; i < kNumItems; i++) {
+ current_value += kItemSize;
+ FilePath path = kPathBase.Append(
+ FilePath::FromUTF8Unsafe(base::StringPrintf("%d", current_value)));
+ uint64_t offset = i % 3 == 0 ? 1 : 0;
+ uint64_t size =
+ i % 2 == 0 ? kItemSize : std::numeric_limits<uint64_t>::max();
+ b.AppendFile(path, offset, size, kTime);
+ }
+ this->InitializeReader(&b);
+
+ // Set expectations.
+ current_value = 0;
+ for (size_t i = 0; i < kNumItems; i++) {
+ uint64_t offset = i % 3 == 0 ? 1 : 0;
+ scoped_ptr<char[]> buf(new char[kItemSize + offset]);
+ if (offset > 0) {
+ memset(buf.get(), 7, offset);
+ }
+ for (size_t j = 0; j < kItemSize; j++) {
+ buf.get()[j + offset] = current_value++;
+ }
+ scoped_ptr<FakeFileStreamReader> reader(new FakeFileStreamReader(
+ std::string(buf.get() + offset, kItemSize), kItemSize + offset));
+ if (i % 4 != 0) {
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+ }
+ FilePath path = kPathBase.Append(
+ FilePath::FromUTF8Unsafe(base::StringPrintf("%d", current_value)));
+ ExpectLocalFileCall(path, kTime, offset, reader.release());
+ }
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeNotCalculatedYet(size_result);
+ message_loop_.RunUntilIdle();
+ CheckSizeCalculatedAsynchronously(kTotalSize, size_result);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
+
+ current_value = 0;
+ for (size_t i = 0; i < kTotalSize / kBufferSize; i++) {
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->Read(buffer.get(), kBufferSize, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(0, bytes_read);
+ EXPECT_EQ(kBufferSize, static_cast<size_t>(async_bytes_read));
+ for (size_t j = 0; j < kBufferSize; j++) {
+ EXPECT_EQ(current_value, buffer->data()[j]);
+ current_value++;
+ }
+ }
+}
+
+TEST_F(BlobReaderTest, MixedContent) {
+ // Includes data, a file, and a disk cache entry.
+ scoped_ptr<disk_cache::Backend> cache =
+ CreateInMemoryDiskCache(message_loop_.task_runner());
+ ASSERT_TRUE(cache);
+
+ BlobDataBuilder b("uuid");
+ const std::string kData1("Hello ");
+ const std::string kData2("there. ");
+ const std::string kData3("This ");
+ const std::string kData4("is multi-content.");
+ const uint64_t kDataSize = 35;
+
+ const base::Time kTime = base::Time::Now();
+ const FilePath kData1Path = FilePath::FromUTF8Unsafe("/fake/file.txt");
+
+ disk_cache::ScopedEntryPtr entry3 =
+ CreateDiskCacheEntry(cache.get(), "test entry", kData3);
+
+ b.AppendFile(kData1Path, 0, kData1.size(), kTime);
+ b.AppendData(kData2);
+ b.AppendDiskCacheEntry(
+ scoped_refptr<BlobDataBuilder::DataHandle>(new EmptyDataHandle()),
+ entry3.get(), kTestDiskCacheStreamIndex);
+ b.AppendData(kData4);
+
+ this->InitializeReader(&b);
+
+ scoped_ptr<FakeFileStreamReader> reader(new FakeFileStreamReader(kData1));
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+ ExpectLocalFileCall(kData1Path, kTime, 0, reader.release());
+
+ int size_result = -1;
+ EXPECT_FALSE(IsReaderTotalSizeCalculated());
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ CheckSizeNotCalculatedYet(size_result);
+ message_loop_.RunUntilIdle();
+ CheckSizeCalculatedAsynchronously(kDataSize, size_result);
+
+ scoped_refptr<net::IOBuffer> buffer = CreateBuffer(kDataSize);
+
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->Read(buffer.get(), kDataSize, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(0, async_bytes_read);
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(net::OK, reader_->net_error());
+ EXPECT_EQ(0, bytes_read);
+ EXPECT_EQ(kDataSize, static_cast<size_t>(async_bytes_read));
+ EXPECT_EQ(0, memcmp(buffer->data(), "Hello there. This is multi-content.",
+ kDataSize));
+}
+
+TEST_F(BlobReaderTest, StateErrors) {
+ // Test common variables
+ int bytes_read = -1;
+ int async_bytes_read = -1;
+ int size_result = -1;
+ const std::string kData("Hello!!!");
+
+ // Case: Blob handle is a nullptr.
+ InitializeReader(nullptr);
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+ EXPECT_EQ(BlobReader::Status::NET_ERROR, reader_->SetReadRange(0, 10));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+ scoped_refptr<net::IOBuffer> buffer = CreateBuffer(10);
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->Read(buffer.get(), 10, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+
+ // Case: Not calling CalculateSize before SetReadRange.
+ BlobDataBuilder builder1("uuid1");
+ builder1.AppendData(kData);
+ InitializeReader(&builder1);
+ EXPECT_EQ(BlobReader::Status::NET_ERROR, reader_->SetReadRange(0, 10));
+ EXPECT_EQ(net::ERR_FAILED, reader_->net_error());
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->Read(buffer.get(), 10, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+
+ // Case: Not calling CalculateSize before Read.
+ BlobDataBuilder builder2("uuid2");
+ builder2.AppendData(kData);
+ InitializeReader(&builder2);
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->Read(buffer.get(), 10, &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+}
+
+TEST_F(BlobReaderTest, FileErrorsSync) {
+ int size_result = -1;
+ const FilePath kPath = FilePath::FromUTF8Unsafe("/fake/file.txt");
+ const std::string kData = "FileData!!!";
+ const base::Time kTime = base::Time::Now();
+
+ // Case: Error on length query.
+ BlobDataBuilder builder1("uuid1");
+ builder1.AppendFile(kPath, 0, kData.size(), kTime);
+ this->InitializeReader(&builder1);
+ FakeFileStreamReader* reader = new FakeFileStreamReader(kData);
+ reader->SetReturnError(net::ERR_FILE_NOT_FOUND);
+ ExpectLocalFileCall(kPath, kTime, 0, reader);
+
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+
+ // Case: Error on read.
+ BlobDataBuilder builder2("uuid2");
+ builder2.AppendFile(kPath, 0, kData.size(), kTime);
+ this->InitializeReader(&builder2);
+ reader = new FakeFileStreamReader(kData);
+ ExpectLocalFileCall(kPath, kTime, 0, reader);
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ reader->SetReturnError(net::ERR_FILE_NOT_FOUND);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+}
+
+TEST_F(BlobReaderTest, FileErrorsAsync) {
+ int size_result = -1;
+ const FilePath kPath = FilePath::FromUTF8Unsafe("/fake/file.txt");
+ const std::string kData = "FileData!!!";
+ const base::Time kTime = base::Time::Now();
+
+ // Case: Error on length query.
+ BlobDataBuilder builder1("uuid1");
+ builder1.AppendFile(kPath, 0, kData.size(), kTime);
+ this->InitializeReader(&builder1);
+ FakeFileStreamReader* reader = new FakeFileStreamReader(kData);
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+ reader->SetReturnError(net::ERR_FILE_NOT_FOUND);
+ ExpectLocalFileCall(kPath, kTime, 0, reader);
+
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, size_result);
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+
+ // Case: Error on read.
+ BlobDataBuilder builder2("uuid2");
+ builder2.AppendFile(kPath, 0, kData.size(), kTime);
+ this->InitializeReader(&builder2);
+ reader = new FakeFileStreamReader(kData);
+ ExpectLocalFileCall(kPath, kTime, 0, reader);
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ reader->SetReturnError(net::ERR_FILE_NOT_FOUND);
+ reader->SetAsyncRunner(message_loop_.task_runner().get());
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kData.size()));
+ int bytes_read = 0;
+ int async_bytes_read = 0;
+ EXPECT_EQ(BlobReader::Status::IO_PENDING,
+ reader_->Read(buffer.get(), kData.size(), &bytes_read,
+ base::Bind(&SetValue<int>, &async_bytes_read)));
+ EXPECT_EQ(net::OK, reader_->net_error());
+ message_loop_.RunUntilIdle();
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, async_bytes_read);
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+}
+
+TEST_F(BlobReaderTest, RangeError) {
+ const std::string kData("Hello!!!");
+ const size_t kDataSize = 8ul;
+ const uint64_t kReadLength = 4ull;
+
+ // Case: offset too high.
+ BlobDataBuilder b("uuid1");
+ b.AppendData(kData);
+ this->InitializeReader(&b);
+ int size_result = -1;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ scoped_refptr<net::IOBuffer> buffer = CreateBuffer(kDataSize);
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->SetReadRange(kDataSize + 1, kReadLength));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+
+ // Case: length too long.
+ BlobDataBuilder b2("uuid2");
+ b2.AppendData(kData);
+ this->InitializeReader(&b2);
+ size_result = -1;
+ EXPECT_EQ(BlobReader::Status::DONE,
+ reader_->CalculateSize(base::Bind(&SetValue<int>, &size_result)));
+ buffer = CreateBuffer(kDataSize + 1);
+ EXPECT_EQ(BlobReader::Status::NET_ERROR,
+ reader_->SetReadRange(0, kDataSize + 1));
+ EXPECT_EQ(net::ERR_FILE_NOT_FOUND, reader_->net_error());
+}
+
+} // namespace
+} // namespace storage
diff --git a/content/browser/fileapi/blob_url_request_job_unittest.cc b/content/browser/fileapi/blob_url_request_job_unittest.cc
index 9debd4d..96becf0 100644
--- a/content/browser/fileapi/blob_url_request_job_unittest.cc
+++ b/content/browser/fileapi/blob_url_request_job_unittest.cc
@@ -115,7 +115,7 @@ class BlobURLRequestJobTest : public testing::Test {
net::URLRequest* request,
net::NetworkDelegate* network_delegate) const override {
return new BlobURLRequestJob(request, network_delegate,
- test_->GetSnapshotFromBuilder(),
+ test_->GetHandleFromBuilder(),
test_->file_system_context_.get(),
base::ThreadTaskRunnerHandle::Get().get());
}
@@ -157,6 +157,7 @@ class BlobURLRequestJobTest : public testing::Test {
void TearDown() override {
blob_handle_.reset();
+ request_.reset();
// Clean up for ASAN
base::RunLoop run_loop;
run_loop.RunUntilIdle();
@@ -282,18 +283,19 @@ class BlobURLRequestJobTest : public testing::Test {
*expected_result += std::string(kTestFileSystemFileData2 + 6, 7);
}
- scoped_ptr<BlobDataSnapshot> GetSnapshotFromBuilder() {
+ storage::BlobDataHandle* GetHandleFromBuilder() {
if (!blob_handle_) {
blob_handle_ = blob_context_.AddFinishedBlob(blob_data_.get()).Pass();
}
- return blob_handle_->CreateSnapshot().Pass();
+ return blob_handle_.get();
}
// This only works if all the Blob items have a definite pre-computed length.
// Otherwise, this will fail a CHECK.
int64 GetTotalBlobLength() {
int64 total = 0;
- scoped_ptr<BlobDataSnapshot> data = GetSnapshotFromBuilder();
+ scoped_ptr<BlobDataSnapshot> data =
+ GetHandleFromBuilder()->CreateSnapshot();
const auto& items = data->items();
for (const auto& item : items) {
int64 length = base::checked_cast<int64>(item->length());
@@ -491,6 +493,27 @@ TEST_F(BlobURLRequestJobTest, TestGetRangeRequest2) {
EXPECT_EQ(total, length);
}
+TEST_F(BlobURLRequestJobTest, TestGetRangeRequest3) {
+ SetUpFileSystem();
+ std::string result;
+ BuildComplicatedData(&result);
+ net::HttpRequestHeaders extra_headers;
+ extra_headers.SetHeader(net::HttpRequestHeaders::kRange,
+ net::HttpByteRange::Bounded(0, 2).GetHeaderValue());
+ expected_status_code_ = 206;
+ expected_response_ = result.substr(0, 3);
+ TestRequest("GET", extra_headers);
+
+ EXPECT_EQ(3, request_->response_headers()->GetContentLength());
+
+ int64 first = 0, last = 0, length = 0;
+ EXPECT_TRUE(
+ request_->response_headers()->GetContentRange(&first, &last, &length));
+ EXPECT_EQ(0, first);
+ EXPECT_EQ(2, last);
+ EXPECT_EQ(GetTotalBlobLength(), length);
+}
+
TEST_F(BlobURLRequestJobTest, TestExtraHeaders) {
blob_data_->set_content_type(kTestContentType);
blob_data_->set_content_disposition(kTestContentDisposition);
diff --git a/content/browser/loader/upload_data_stream_builder.cc b/content/browser/loader/upload_data_stream_builder.cc
index cab3a10..12b4b80 100644
--- a/content/browser/loader/upload_data_stream_builder.cc
+++ b/content/browser/loader/upload_data_stream_builder.cc
@@ -4,6 +4,7 @@
#include "content/browser/loader/upload_data_stream_builder.h"
+#include <limits>
#include <utility>
#include <vector>
@@ -13,11 +14,11 @@
#include "content/common/resource_request_body.h"
#include "net/base/elements_upload_data_stream.h"
#include "net/base/upload_bytes_element_reader.h"
-#include "net/base/upload_disk_cache_entry_element_reader.h"
#include "net/base/upload_file_element_reader.h"
#include "storage/browser/blob/blob_data_handle.h"
-#include "storage/browser/blob/blob_data_snapshot.h"
+#include "storage/browser/blob/blob_reader.h"
#include "storage/browser/blob/blob_storage_context.h"
+#include "storage/browser/blob/upload_blob_element_reader.h"
namespace disk_cache {
class Entry;
@@ -69,89 +70,15 @@ class FileElementReader : public net::UploadFileElementReader {
DISALLOW_COPY_AND_ASSIGN(FileElementReader);
};
-// This owns the provided ResourceRequestBody. This is necessary to ensure the
-// BlobData and open disk cache entries survive until upload completion.
-class DiskCacheElementReader : public net::UploadDiskCacheEntryElementReader {
- public:
- DiskCacheElementReader(ResourceRequestBody* resource_request_body,
- disk_cache::Entry* disk_cache_entry,
- int disk_cache_stream_index,
- const ResourceRequestBody::Element& element)
- : net::UploadDiskCacheEntryElementReader(disk_cache_entry,
- disk_cache_stream_index,
- element.offset(),
- element.length()),
- resource_request_body_(resource_request_body) {
- DCHECK_EQ(ResourceRequestBody::Element::TYPE_DISK_CACHE_ENTRY,
- element.type());
- }
-
- ~DiskCacheElementReader() override {}
-
- private:
- scoped_refptr<ResourceRequestBody> resource_request_body_;
-
- DISALLOW_COPY_AND_ASSIGN(DiskCacheElementReader);
-};
-
-void ResolveBlobReference(
- ResourceRequestBody* body,
- storage::BlobStorageContext* blob_context,
- const ResourceRequestBody::Element& element,
- std::vector<std::pair<const ResourceRequestBody::Element*,
- const storage::BlobDataItem*>>* resolved_elements) {
- DCHECK(blob_context);
- scoped_ptr<storage::BlobDataHandle> handle =
- blob_context->GetBlobDataFromUUID(element.blob_uuid());
- DCHECK(handle);
- if (!handle)
- return;
-
- // TODO(dmurph): Create a reader for blobs instead of decomposing the blob
- // and storing the snapshot on the request to keep the resources around.
- // Currently a handle is attached to the request in the resource dispatcher
- // host, so we know the blob won't go away, but it's not very clear or useful.
- scoped_ptr<storage::BlobDataSnapshot> snapshot = handle->CreateSnapshot();
- // If there is no element in the referred blob data, just return.
- if (snapshot->items().empty())
- return;
-
- // Append the elements in the referenced blob data.
- for (const auto& item : snapshot->items()) {
- DCHECK_NE(storage::DataElement::TYPE_BLOB, item->type());
- resolved_elements->push_back(
- std::make_pair(item->data_element_ptr(), item.get()));
- }
- const void* key = snapshot.get();
- body->SetUserData(key, snapshot.release());
-}
-
} // namespace
scoped_ptr<net::UploadDataStream> UploadDataStreamBuilder::Build(
ResourceRequestBody* body,
storage::BlobStorageContext* blob_context,
storage::FileSystemContext* file_system_context,
- base::TaskRunner* file_task_runner) {
- // Resolve all blob elements.
- std::vector<std::pair<const ResourceRequestBody::Element*,
- const storage::BlobDataItem*>> resolved_elements;
- for (size_t i = 0; i < body->elements()->size(); ++i) {
- const ResourceRequestBody::Element& element = (*body->elements())[i];
- if (element.type() == ResourceRequestBody::Element::TYPE_BLOB) {
- ResolveBlobReference(body, blob_context, element, &resolved_elements);
- } else if (element.type() !=
- ResourceRequestBody::Element::TYPE_DISK_CACHE_ENTRY) {
- resolved_elements.push_back(std::make_pair(&element, nullptr));
- } else {
- NOTREACHED();
- }
- }
-
+ base::SingleThreadTaskRunner* file_task_runner) {
ScopedVector<net::UploadElementReader> element_readers;
- for (const auto& element_and_blob_item_pair : resolved_elements) {
- const ResourceRequestBody::Element& element =
- *element_and_blob_item_pair.first;
+ for (const auto& element : *body->elements()) {
switch (element.type()) {
case ResourceRequestBody::Element::TYPE_BYTES:
element_readers.push_back(new BytesElementReader(body, element));
@@ -172,22 +99,18 @@ scoped_ptr<net::UploadDataStream> UploadDataStreamBuilder::Build(
element.length(),
element.expected_modification_time()));
break;
- case ResourceRequestBody::Element::TYPE_BLOB:
- // Blob elements should be resolved beforehand.
- // TODO(dmurph): Create blob reader and store the snapshot in there.
- NOTREACHED();
- break;
- case ResourceRequestBody::Element::TYPE_DISK_CACHE_ENTRY: {
- // TODO(gavinp): If Build() is called with a DataElement of
- // TYPE_DISK_CACHE_ENTRY then this code won't work because we won't call
- // ResolveBlobReference() and so we won't find |item|. Is this OK?
- const storage::BlobDataItem* item = element_and_blob_item_pair.second;
- element_readers.push_back(
- new DiskCacheElementReader(body, item->disk_cache_entry(),
- item->disk_cache_stream_index(),
- element));
+ case ResourceRequestBody::Element::TYPE_BLOB: {
+ DCHECK_EQ(std::numeric_limits<uint64_t>::max(), element.length());
+ DCHECK_EQ(0ul, element.offset());
+ scoped_ptr<storage::BlobDataHandle> handle =
+ blob_context->GetBlobDataFromUUID(element.blob_uuid());
+ storage::BlobDataHandle* handle_ptr = handle.get();
+ element_readers.push_back(new storage::UploadBlobElementReader(
+ handle_ptr->CreateReader(file_system_context, file_task_runner),
+ handle.Pass()));
break;
}
+ case ResourceRequestBody::Element::TYPE_DISK_CACHE_ENTRY:
case ResourceRequestBody::Element::TYPE_UNKNOWN:
NOTREACHED();
break;
diff --git a/content/browser/loader/upload_data_stream_builder.h b/content/browser/loader/upload_data_stream_builder.h
index 228aade..abbcae8 100644
--- a/content/browser/loader/upload_data_stream_builder.h
+++ b/content/browser/loader/upload_data_stream_builder.h
@@ -9,7 +9,7 @@
#include "content/common/content_export.h"
namespace base {
-class TaskRunner;
+class SingleThreadTaskRunner;
}
namespace storage {
@@ -44,7 +44,7 @@ class CONTENT_EXPORT UploadDataStreamBuilder {
ResourceRequestBody* body,
storage::BlobStorageContext* blob_context,
storage::FileSystemContext* file_system_context,
- base::TaskRunner* file_task_runner);
+ base::SingleThreadTaskRunner* file_task_runner);
};
} // namespace content
diff --git a/content/browser/loader/upload_data_stream_builder_unittest.cc b/content/browser/loader/upload_data_stream_builder_unittest.cc
index b3edd5d..e323a4e 100644
--- a/content/browser/loader/upload_data_stream_builder_unittest.cc
+++ b/content/browser/loader/upload_data_stream_builder_unittest.cc
@@ -18,11 +18,11 @@
#include "net/base/test_completion_callback.h"
#include "net/base/upload_bytes_element_reader.h"
#include "net/base/upload_data_stream.h"
-#include "net/base/upload_disk_cache_entry_element_reader.h"
#include "net/base/upload_file_element_reader.h"
-#include "net/disk_cache/disk_cache.h"
#include "storage/browser/blob/blob_data_builder.h"
+#include "storage/browser/blob/blob_data_handle.h"
#include "storage/browser/blob/blob_storage_context.h"
+#include "storage/browser/blob/upload_blob_element_reader.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "url/gurl.h"
@@ -31,351 +31,59 @@ using storage::BlobDataHandle;
using storage::BlobStorageContext;
namespace content {
-namespace {
-const int kTestDiskCacheStreamIndex = 0;
-
-// Our disk cache tests don't need a real data handle since the tests themselves
-// scope the disk cache and entries.
-class EmptyDataHandle : public storage::BlobDataBuilder::DataHandle {
- private:
- ~EmptyDataHandle() override {}
-};
-
-scoped_ptr<disk_cache::Backend> CreateInMemoryDiskCache() {
- scoped_ptr<disk_cache::Backend> cache;
- net::TestCompletionCallback callback;
- int rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
- net::CACHE_BACKEND_DEFAULT,
- base::FilePath(), 0,
- false, nullptr, nullptr, &cache,
- callback.callback());
- EXPECT_EQ(net::OK, callback.GetResult(rv));
-
- return cache.Pass();
-}
-
-disk_cache::ScopedEntryPtr CreateDiskCacheEntry(disk_cache::Backend* cache,
- const char* key,
- const std::string& data) {
- disk_cache::Entry* temp_entry = nullptr;
- net::TestCompletionCallback callback;
- int rv = cache->CreateEntry(key, &temp_entry, callback.callback());
- if (callback.GetResult(rv) != net::OK)
- return nullptr;
- disk_cache::ScopedEntryPtr entry(temp_entry);
-
- scoped_refptr<net::StringIOBuffer> iobuffer = new net::StringIOBuffer(data);
- rv = entry->WriteData(kTestDiskCacheStreamIndex, 0, iobuffer.get(),
- iobuffer->size(), callback.callback(), false);
- EXPECT_EQ(static_cast<int>(data.size()), callback.GetResult(rv));
- return entry.Pass();
-}
-
-bool AreElementsEqual(const net::UploadElementReader& reader,
- const ResourceRequestBody::Element& element) {
- switch(element.type()) {
- case ResourceRequestBody::Element::TYPE_BYTES: {
- const net::UploadBytesElementReader* bytes_reader =
- reader.AsBytesReader();
- return bytes_reader &&
- element.length() == bytes_reader->length() &&
- std::equal(element.bytes(), element.bytes() + element.length(),
- bytes_reader->bytes());
- }
- case ResourceRequestBody::Element::TYPE_FILE: {
- const net::UploadFileElementReader* file_reader = reader.AsFileReader();
- return file_reader &&
- file_reader->path() == element.path() &&
- file_reader->range_offset() == element.offset() &&
- file_reader->range_length() == element.length() &&
- file_reader->expected_modification_time() ==
- element.expected_modification_time();
- break;
- }
- case ResourceRequestBody::Element::TYPE_DISK_CACHE_ENTRY: {
- // TODO(gavinp): Should we be comparing a higher level structure
- // such as the BlobDataItem so that we can do stronger equality
- // comparisons?
- const net::UploadDiskCacheEntryElementReader* disk_cache_entry_reader =
- reader.AsDiskCacheEntryReaderForTests();
- return disk_cache_entry_reader &&
- disk_cache_entry_reader->range_offset_for_tests() ==
- static_cast<int>(element.offset()) &&
- disk_cache_entry_reader->range_length_for_tests() ==
- static_cast<int>(element.length());
- break;
- }
- default:
- NOTREACHED();
- }
- return false;
-}
-
-} // namespace
-
-TEST(UploadDataStreamBuilderTest, CreateUploadDataStreamWithoutBlob) {
- base::MessageLoop message_loop;
- scoped_refptr<ResourceRequestBody> request_body = new ResourceRequestBody;
-
- const char kData[] = "123";
- const base::FilePath::StringType kFilePath = FILE_PATH_LITERAL("abc");
- const uint64 kFileOffset = 10U;
- const uint64 kFileLength = 100U;
- const base::Time kFileTime = base::Time::FromDoubleT(999);
- const int64 kIdentifier = 12345;
-
- request_body->AppendBytes(kData, arraysize(kData) - 1);
- request_body->AppendFileRange(base::FilePath(kFilePath),
- kFileOffset, kFileLength, kFileTime);
- request_body->set_identifier(kIdentifier);
-
- scoped_ptr<net::UploadDataStream> upload(UploadDataStreamBuilder::Build(
- request_body.get(), NULL, NULL,
- base::ThreadTaskRunnerHandle::Get().get()));
-
- EXPECT_EQ(kIdentifier, upload->identifier());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(request_body->elements()->size(),
- upload->GetElementReaders()->size());
-
- const net::UploadBytesElementReader* r1 =
- (*upload->GetElementReaders())[0]->AsBytesReader();
- ASSERT_TRUE(r1);
- EXPECT_EQ(kData, std::string(r1->bytes(), r1->length()));
-
- const net::UploadFileElementReader* r2 =
- (*upload->GetElementReaders())[1]->AsFileReader();
- ASSERT_TRUE(r2);
- EXPECT_EQ(kFilePath, r2->path().value());
- EXPECT_EQ(kFileOffset, r2->range_offset());
- EXPECT_EQ(kFileLength, r2->range_length());
- EXPECT_EQ(kFileTime, r2->expected_modification_time());
-}
-
-TEST(UploadDataStreamBuilderTest, ResolveBlobAndCreateUploadDataStream) {
+TEST(UploadDataStreamBuilderTest, CreateUploadDataStream) {
base::MessageLoop message_loop;
{
- // Setup blob data for testing.
- base::Time time1, time2;
- base::Time::FromString("Tue, 15 Nov 1994, 12:45:26 GMT", &time1);
- base::Time::FromString("Mon, 14 Nov 1994, 11:30:49 GMT", &time2);
-
- BlobStorageContext blob_storage_context;
-
- const std::string blob_id0("id-0");
- scoped_ptr<BlobDataBuilder> blob_data_builder(
- new BlobDataBuilder(blob_id0));
- scoped_ptr<BlobDataHandle> handle1 =
- blob_storage_context.AddFinishedBlob(blob_data_builder.get());
-
- const std::string blob_id1("id-1");
- const std::string kBlobData = "BlobData";
- blob_data_builder.reset(new BlobDataBuilder(blob_id1));
- blob_data_builder->AppendData(kBlobData);
- blob_data_builder->AppendFile(
- base::FilePath(FILE_PATH_LITERAL("BlobFile.txt")), 0, 20, time1);
- scoped_ptr<BlobDataHandle> handle2 =
- blob_storage_context.AddFinishedBlob(blob_data_builder.get());
-
- const std::string blob_id2("id-2");
- const std::string kDiskCacheData = "DiskCacheData";
- scoped_ptr<disk_cache::Backend> disk_cache_backend =
- CreateInMemoryDiskCache();
- ASSERT_TRUE(disk_cache_backend);
- disk_cache::ScopedEntryPtr disk_cache_entry =
- CreateDiskCacheEntry(disk_cache_backend.get(), "a key", kDiskCacheData);
- ASSERT_TRUE(disk_cache_entry);
- blob_data_builder.reset(new BlobDataBuilder(blob_id2));
- blob_data_builder->AppendDiskCacheEntry(
- new EmptyDataHandle(), disk_cache_entry.get(),
- kTestDiskCacheStreamIndex);
- scoped_ptr<BlobDataHandle> handle3 =
- blob_storage_context.AddFinishedBlob(blob_data_builder.get());
-
- // Setup upload data elements for comparison.
- ResourceRequestBody::Element blob_element1, blob_element2, blob_element3;
- blob_element1.SetToBytes(kBlobData.c_str(), kBlobData.size());
- blob_element2.SetToFilePathRange(
- base::FilePath(FILE_PATH_LITERAL("BlobFile.txt")), 0, 20, time1);
- blob_element3.SetToDiskCacheEntryRange(0, kDiskCacheData.size());
-
- ResourceRequestBody::Element upload_element1, upload_element2;
- upload_element1.SetToBytes("Hello", 5);
- upload_element2.SetToFilePathRange(
- base::FilePath(FILE_PATH_LITERAL("foo1.txt")), 0, 20, time2);
-
- // Test no blob reference.
- scoped_refptr<ResourceRequestBody> request_body(new ResourceRequestBody());
- request_body->AppendBytes(
- upload_element1.bytes(),
- upload_element1.length());
- request_body->AppendFileRange(
- upload_element2.path(),
- upload_element2.offset(),
- upload_element2.length(),
- upload_element2.expected_modification_time());
+ scoped_refptr<ResourceRequestBody> request_body = new ResourceRequestBody;
+
+ const std::string kBlob = "blobuuid";
+ const std::string kBlobData = "blobdata";
+ const char kData[] = "123";
+ const base::FilePath::StringType kFilePath = FILE_PATH_LITERAL("abc");
+ const uint64 kFileOffset = 10U;
+ const uint64 kFileLength = 100U;
+ const base::Time kFileTime = base::Time::FromDoubleT(999);
+ const int64 kIdentifier = 12345;
+
+ BlobStorageContext context;
+ BlobDataBuilder builder(kBlob);
+ builder.AppendData(kBlobData);
+ scoped_ptr<BlobDataHandle> handle = context.AddFinishedBlob(&builder);
+
+ request_body->AppendBytes(kData, arraysize(kData) - 1);
+ request_body->AppendFileRange(base::FilePath(kFilePath), kFileOffset,
+ kFileLength, kFileTime);
+ request_body->AppendBlob(kBlob);
+ request_body->set_identifier(kIdentifier);
scoped_ptr<net::UploadDataStream> upload(UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, NULL,
+ request_body.get(), &context, NULL,
base::ThreadTaskRunnerHandle::Get().get()));
+ EXPECT_EQ(kIdentifier, upload->identifier());
ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(2U, upload->GetElementReaders()->size());
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[0], upload_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[1], upload_element2));
-
- // Test having only one blob reference that refers to empty blob data.
- request_body = new ResourceRequestBody();
- request_body->AppendBlob(blob_id0);
-
- upload = UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, NULL,
- base::ThreadTaskRunnerHandle::Get().get());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(0U, upload->GetElementReaders()->size());
-
- // Test having only one blob reference.
- request_body = new ResourceRequestBody();
- request_body->AppendBlob(blob_id1);
-
- upload = UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, NULL,
- base::ThreadTaskRunnerHandle::Get().get());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(2U, upload->GetElementReaders()->size());
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[0], blob_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[1], blob_element2));
-
- // Test having one blob reference which refers to a disk cache entry.
- request_body = new ResourceRequestBody();
- request_body->AppendBlob(blob_id2);
-
- upload = UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, nullptr,
- base::ThreadTaskRunnerHandle::Get().get());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(1U, upload->GetElementReaders()->size());
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[0], blob_element3));
-
- // Test having one blob reference at the beginning.
- request_body = new ResourceRequestBody();
- request_body->AppendBlob(blob_id1);
- request_body->AppendBytes(
- upload_element1.bytes(),
- upload_element1.length());
- request_body->AppendFileRange(
- upload_element2.path(),
- upload_element2.offset(),
- upload_element2.length(),
- upload_element2.expected_modification_time());
-
- upload = UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, NULL,
- base::ThreadTaskRunnerHandle::Get().get());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(4U, upload->GetElementReaders()->size());
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[0], blob_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[1], blob_element2));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[2], upload_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[3], upload_element2));
-
- // Test having one blob reference at the end.
- request_body = new ResourceRequestBody();
- request_body->AppendBytes(
- upload_element1.bytes(),
- upload_element1.length());
- request_body->AppendFileRange(
- upload_element2.path(),
- upload_element2.offset(),
- upload_element2.length(),
- upload_element2.expected_modification_time());
- request_body->AppendBlob(blob_id1);
-
- upload = UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, NULL,
- base::ThreadTaskRunnerHandle::Get().get());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(4U, upload->GetElementReaders()->size());
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[0], upload_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[1], upload_element2));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[2], blob_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[3], blob_element2));
-
- // Test having one blob reference in the middle.
- request_body = new ResourceRequestBody();
- request_body->AppendBytes(
- upload_element1.bytes(),
- upload_element1.length());
- request_body->AppendBlob(blob_id1);
- request_body->AppendFileRange(
- upload_element2.path(),
- upload_element2.offset(),
- upload_element2.length(),
- upload_element2.expected_modification_time());
-
- upload = UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, NULL,
- base::ThreadTaskRunnerHandle::Get().get());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(4U, upload->GetElementReaders()->size());
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[0], upload_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[1], blob_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[2], blob_element2));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[3], upload_element2));
-
- // Test having multiple blob references.
- request_body = new ResourceRequestBody();
- request_body->AppendBlob(blob_id1);
- request_body->AppendBytes(
- upload_element1.bytes(),
- upload_element1.length());
- request_body->AppendBlob(blob_id1);
- request_body->AppendBlob(blob_id1);
- request_body->AppendFileRange(
- upload_element2.path(),
- upload_element2.offset(),
- upload_element2.length(),
- upload_element2.expected_modification_time());
-
- upload = UploadDataStreamBuilder::Build(
- request_body.get(), &blob_storage_context, NULL,
- base::ThreadTaskRunnerHandle::Get().get());
- ASSERT_TRUE(upload->GetElementReaders());
- ASSERT_EQ(8U, upload->GetElementReaders()->size());
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[0], blob_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[1], blob_element2));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[2], upload_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[3], blob_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[4], blob_element2));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[5], blob_element1));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[6], blob_element2));
- EXPECT_TRUE(AreElementsEqual(
- *(*upload->GetElementReaders())[7], upload_element2));
+ ASSERT_EQ(request_body->elements()->size(),
+ upload->GetElementReaders()->size());
+
+ const net::UploadBytesElementReader* r1 =
+ (*upload->GetElementReaders())[0]->AsBytesReader();
+ ASSERT_TRUE(r1);
+ EXPECT_EQ(kData, std::string(r1->bytes(), r1->length()));
+
+ const net::UploadFileElementReader* r2 =
+ (*upload->GetElementReaders())[1]->AsFileReader();
+ ASSERT_TRUE(r2);
+ EXPECT_EQ(kFilePath, r2->path().value());
+ EXPECT_EQ(kFileOffset, r2->range_offset());
+ EXPECT_EQ(kFileLength, r2->range_length());
+ EXPECT_EQ(kFileTime, r2->expected_modification_time());
+
+ const storage::UploadBlobElementReader* r3 =
+ static_cast<storage::UploadBlobElementReader*>(
+ (*upload->GetElementReaders())[2]);
+ ASSERT_TRUE(r3);
+ EXPECT_EQ("blobuuid", r3->uuid());
}
// Clean up for ASAN.
base::RunLoop().RunUntilIdle();
@@ -402,9 +110,6 @@ TEST(UploadDataStreamBuilderTest,
scoped_ptr<BlobDataHandle> handle =
blob_storage_context.AddFinishedBlob(blob_data_builder.get());
- ResourceRequestBody::Element blob_element;
- blob_element.SetToFilePathRange(test_blob_path, 0, kZeroLength, blob_time);
-
scoped_refptr<ResourceRequestBody> request_body(new ResourceRequestBody());
scoped_ptr<net::UploadDataStream> upload(UploadDataStreamBuilder::Build(
request_body.get(), &blob_storage_context, NULL,
@@ -421,9 +126,6 @@ TEST(UploadDataStreamBuilderTest,
ASSERT_TRUE(upload->GetElementReaders());
const auto& readers = *upload->GetElementReaders();
ASSERT_EQ(3U, readers.size());
- EXPECT_TRUE(AreElementsEqual(*readers[0], blob_element));
- EXPECT_TRUE(AreElementsEqual(*readers[1], blob_element));
- EXPECT_TRUE(AreElementsEqual(*readers[2], blob_element));
net::TestCompletionCallback init_callback;
ASSERT_EQ(net::ERR_IO_PENDING, upload->Init(init_callback.callback()));
diff --git a/content/content_tests.gypi b/content/content_tests.gypi
index af39c31..3ef69ee 100644
--- a/content/content_tests.gypi
+++ b/content/content_tests.gypi
@@ -385,6 +385,7 @@
'browser/download/file_metadata_unittest_linux.cc',
'browser/download/rate_estimator_unittest.cc',
'browser/download/save_package_unittest.cc',
+ 'browser/fileapi/blob_reader_unittest.cc',
'browser/fileapi/blob_storage_context_unittest.cc',
'browser/fileapi/blob_url_request_job_unittest.cc',
'browser/fileapi/copy_or_move_file_validator_unittest.cc',
diff --git a/net/base/upload_disk_cache_entry_element_reader.cc b/net/base/upload_disk_cache_entry_element_reader.cc
deleted file mode 100644
index 0635aeb..0000000
--- a/net/base/upload_disk_cache_entry_element_reader.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/base/upload_disk_cache_entry_element_reader.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
-#include "net/disk_cache/disk_cache.h"
-
-namespace net {
-
-UploadDiskCacheEntryElementReader::UploadDiskCacheEntryElementReader(
- disk_cache::Entry* disk_cache_entry,
- int disk_cache_stream_index,
- int range_offset,
- int range_length)
- : disk_cache_entry_(disk_cache_entry),
- disk_cache_stream_index_(disk_cache_stream_index),
- range_begin_offset_(range_offset),
- range_end_offset_(range_offset + range_length),
- current_read_offset_(range_offset),
- weak_factory_(this) {
- DCHECK_LE(0, range_offset);
- DCHECK_LT(0, range_length);
- DCHECK_LE(range_offset + range_length,
- disk_cache_entry_->GetDataSize(disk_cache_stream_index_));
-}
-
-UploadDiskCacheEntryElementReader::~UploadDiskCacheEntryElementReader() {
-}
-
-const UploadDiskCacheEntryElementReader*
-UploadDiskCacheEntryElementReader::AsDiskCacheEntryReaderForTests() const {
- return this;
-}
-
-int UploadDiskCacheEntryElementReader::Init(
- const CompletionCallback& callback) {
- weak_factory_.InvalidateWeakPtrs();
- current_read_offset_ = range_begin_offset_;
- return OK;
-}
-
-uint64_t UploadDiskCacheEntryElementReader::GetContentLength() const {
- return range_end_offset_ - range_begin_offset_;
-}
-
-uint64_t UploadDiskCacheEntryElementReader::BytesRemaining() const {
- return range_end_offset_ - current_read_offset_;
-}
-
-bool UploadDiskCacheEntryElementReader::IsInMemory() const {
- return false;
-}
-
-int UploadDiskCacheEntryElementReader::Read(
- IOBuffer* buf,
- int buf_length,
- const CompletionCallback& callback) {
- DCHECK(!callback.is_null());
- int bytes_to_read = std::min(buf_length, static_cast<int>(BytesRemaining()));
-
- CompletionCallback new_callback =
- base::Bind(&UploadDiskCacheEntryElementReader::OnReadCompleted,
- weak_factory_.GetWeakPtr(), callback);
-
- int result = disk_cache_entry_->ReadData(disk_cache_stream_index_,
- current_read_offset_, buf,
- bytes_to_read, new_callback);
- if (result == ERR_IO_PENDING)
- return ERR_IO_PENDING;
- if (result > 0)
- current_read_offset_ += result;
- return result;
-}
-
-void UploadDiskCacheEntryElementReader::OnReadCompleted(
- const CompletionCallback& callback,
- int result) {
- if (result > 0)
- current_read_offset_ += result;
- callback.Run(result);
-}
-
-} // namespace net
diff --git a/net/base/upload_disk_cache_entry_element_reader.h b/net/base/upload_disk_cache_entry_element_reader.h
deleted file mode 100644
index 1885b2e..0000000
--- a/net/base/upload_disk_cache_entry_element_reader.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_BASE_UPLOAD_DISK_CACHE_ENTRY_ELEMENT_READER_H_
-#define NET_BASE_UPLOAD_DISK_CACHE_ENTRY_ELEMENT_READER_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
-#include "net/base/completion_callback.h"
-#include "net/base/net_export.h"
-#include "net/base/upload_element_reader.h"
-
-namespace disk_cache {
-class Entry;
-}
-
-namespace net {
-
-// An UploadElementReader implementation for disk_cache::Entry objects. The
-// caller keeps ownership of |disk_cache_entry|, and is responsible for ensuring
-// it outlives the UploadDiskCacheEntryElementReader.
-class NET_EXPORT UploadDiskCacheEntryElementReader
- : public UploadElementReader {
- public:
- // Construct a new UploadDiskCacheEntryElementReader which reads from the disk
- // cache entry |disk_cache_entry| with stream index |disk_cache_stream_index|.
- // The new upload reader object will read |range_length| bytes, starting from
- // |range_offset|. To read an whole cache entry give a 0 as |range_offset| and
- // provide the length of the entry's stream as |range_length|.
- UploadDiskCacheEntryElementReader(disk_cache::Entry* disk_cache_entry,
- int disk_cache_stream_index,
- int range_offset,
- int range_length);
- ~UploadDiskCacheEntryElementReader() override;
-
- int range_offset_for_tests() const { return range_begin_offset_; }
- int range_length_for_tests() const {
- return range_end_offset_ - range_begin_offset_;
- }
-
- // UploadElementReader overrides:
- const UploadDiskCacheEntryElementReader* AsDiskCacheEntryReaderForTests()
- const override;
- int Init(const CompletionCallback& callback) override;
- uint64_t GetContentLength() const override;
- uint64_t BytesRemaining() const override;
- bool IsInMemory() const override;
- int Read(IOBuffer* buf,
- int buf_length,
- const CompletionCallback& callback) override;
-
- private:
- void OnReadCompleted(const CompletionCallback& callback, int result);
-
- disk_cache::Entry* const disk_cache_entry_;
- const int disk_cache_stream_index_;
-
- const int range_begin_offset_;
- const int range_end_offset_;
-
- int current_read_offset_;
-
- base::WeakPtrFactory<UploadDiskCacheEntryElementReader> weak_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(UploadDiskCacheEntryElementReader);
-};
-
-} // namespace net
-
-#endif // NET_BASE_UPLOAD_DISK_CACHE_ENTRY_ELEMENT_READER_H_
diff --git a/net/base/upload_disk_cache_entry_element_reader_unittest.cc b/net/base/upload_disk_cache_entry_element_reader_unittest.cc
deleted file mode 100644
index 7321acc..0000000
--- a/net/base/upload_disk_cache_entry_element_reader_unittest.cc
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/base/upload_disk_cache_entry_element_reader.h"
-
-#include <stdint.h>
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
-#include "net/base/io_buffer.h"
-#include "net/base/net_errors.h"
-#include "net/base/test_completion_callback.h"
-#include "net/disk_cache/disk_cache.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/platform_test.h"
-
-namespace net {
-namespace {
-
-const int kTestDiskCacheStreamIndex = 0;
-
-const char kDataKey[] = "a key";
-
-const char kData[] = "this is data in a disk cache entry";
-const size_t kDataSize = arraysize(kData) - 1;
-
-// A disk_cache::Entry that arbitrarily delays the completion of a read
-// operation to allow testing some races without flake. This is particularly
-// relevant in this unit test, which uses the always-synchronous MEMORY_CACHE.
-class DelayedReadEntry : public disk_cache::Entry {
- public:
- explicit DelayedReadEntry(disk_cache::ScopedEntryPtr entry)
- : entry_(entry.Pass()) {}
- ~DelayedReadEntry() override { EXPECT_FALSE(HasPendingReadCallbacks()); }
-
- bool HasPendingReadCallbacks() { return !pending_read_callbacks_.empty(); }
-
- void RunPendingReadCallbacks() {
- std::vector<base::Callback<void(void)>> callbacks;
- pending_read_callbacks_.swap(callbacks);
- for (const auto& callback : callbacks)
- callback.Run();
- }
-
- // From disk_cache::Entry:
- void Doom() override { entry_->Doom(); }
-
- void Close() override { delete this; } // Note this is required by the API.
-
- std::string GetKey() const override { return entry_->GetKey(); }
-
- base::Time GetLastUsed() const override { return entry_->GetLastUsed(); }
-
- base::Time GetLastModified() const override {
- return entry_->GetLastModified();
- }
-
- int32 GetDataSize(int index) const override {
- return entry_->GetDataSize(index);
- }
-
- int ReadData(int index,
- int offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& original_callback) override {
- TestCompletionCallback callback;
- int rv = entry_->ReadData(index, offset, buf, buf_len, callback.callback());
- DCHECK_NE(rv, ERR_IO_PENDING)
- << "Test expects to use a MEMORY_CACHE instance, which is synchronous.";
- pending_read_callbacks_.push_back(base::Bind(original_callback, rv));
- return ERR_IO_PENDING;
- }
-
- int WriteData(int index,
- int offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback,
- bool truncate) override {
- return entry_->WriteData(index, offset, buf, buf_len, callback, truncate);
- }
-
- int ReadSparseData(int64 offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) override {
- return entry_->ReadSparseData(offset, buf, buf_len, callback);
- }
-
- int WriteSparseData(int64 offset,
- IOBuffer* buf,
- int buf_len,
- const CompletionCallback& callback) override {
- return entry_->WriteSparseData(offset, buf, buf_len, callback);
- }
-
- int GetAvailableRange(int64 offset,
- int len,
- int64* start,
- const CompletionCallback& callback) override {
- return entry_->GetAvailableRange(offset, len, start, callback);
- }
-
- bool CouldBeSparse() const override { return entry_->CouldBeSparse(); }
-
- void CancelSparseIO() override { entry_->CancelSparseIO(); }
-
- int ReadyForSparseIO(const CompletionCallback& callback) override {
- return entry_->ReadyForSparseIO(callback);
- }
-
- private:
- disk_cache::ScopedEntryPtr entry_;
- std::vector<base::Callback<void(void)>> pending_read_callbacks_;
-};
-
-class UploadDiskCacheEntryElementReaderTest : public PlatformTest {
- public:
- UploadDiskCacheEntryElementReaderTest() {}
-
- ~UploadDiskCacheEntryElementReaderTest() override {}
-
- void SetUp() override {
- TestCompletionCallback callback;
- int rv = disk_cache::CreateCacheBackend(
- MEMORY_CACHE, CACHE_BACKEND_DEFAULT, base::FilePath(), 0, false,
- nullptr, nullptr, &cache_, callback.callback());
- ASSERT_EQ(OK, callback.GetResult(rv));
-
- disk_cache::Entry* tmp_entry = nullptr;
- rv = cache_->CreateEntry(kDataKey, &tmp_entry, callback.callback());
- ASSERT_EQ(OK, callback.GetResult(rv));
- entry_.reset(tmp_entry);
-
- scoped_refptr<IOBuffer> io_buffer = new WrappedIOBuffer(kData);
- rv = entry_->WriteData(kTestDiskCacheStreamIndex, 0, io_buffer.get(),
- kDataSize, callback.callback(), false);
- EXPECT_EQ(static_cast<int>(kDataSize), callback.GetResult(rv));
- }
-
- void set_entry(disk_cache::ScopedEntryPtr entry) { entry_.swap(entry); }
- disk_cache::Entry* entry() { return entry_.get(); }
- disk_cache::ScopedEntryPtr release_entry() { return entry_.Pass(); }
-
- private:
- scoped_ptr<disk_cache::Backend> cache_;
- disk_cache::ScopedEntryPtr entry_;
-};
-
-TEST_F(UploadDiskCacheEntryElementReaderTest, ReadAll) {
- UploadDiskCacheEntryElementReader reader(entry(), kTestDiskCacheStreamIndex,
- 0, kDataSize);
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
-
- char read_buffer[kDataSize];
- std::fill(read_buffer, read_buffer + arraysize(read_buffer), '\0');
-
- scoped_refptr<IOBuffer> io_buffer = new WrappedIOBuffer(read_buffer);
- TestCompletionCallback callback;
- int rv = reader.Read(io_buffer.get(), kDataSize, callback.callback());
- EXPECT_EQ(static_cast<int>(kDataSize), callback.GetResult(rv));
- EXPECT_EQ(0U, reader.BytesRemaining())
- << "Expected a single read of |kDataSize| to retrieve entire entry.";
- EXPECT_EQ(std::string(kData, kDataSize), std::string(read_buffer, kDataSize));
-}
-
-TEST_F(UploadDiskCacheEntryElementReaderTest, ReadPartially) {
- UploadDiskCacheEntryElementReader reader(entry(), kTestDiskCacheStreamIndex,
- 0, kDataSize);
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
-
- const size_t kReadBuffer1Size = kDataSize / 3;
- char read_buffer1[kReadBuffer1Size];
- std::fill(read_buffer1, read_buffer1 + arraysize(read_buffer1), '\0');
-
- scoped_refptr<IOBuffer> io_buffer1 = new WrappedIOBuffer(read_buffer1);
-
- const size_t kReadBuffer2Size = kDataSize - kReadBuffer1Size;
- char read_buffer2[kReadBuffer2Size];
- scoped_refptr<IOBuffer> io_buffer2 = new WrappedIOBuffer(read_buffer2);
-
- TestCompletionCallback callback;
- int rv = reader.Read(io_buffer1.get(), kReadBuffer1Size, callback.callback());
- EXPECT_EQ(static_cast<int>(kReadBuffer1Size), callback.GetResult(rv));
- EXPECT_EQ(static_cast<uint64_t>(kReadBuffer2Size), reader.BytesRemaining());
-
- rv = reader.Read(io_buffer2.get(), kReadBuffer2Size, callback.callback());
- EXPECT_EQ(static_cast<int>(kReadBuffer2Size), callback.GetResult(rv));
- EXPECT_EQ(0U, reader.BytesRemaining());
-
- EXPECT_EQ(std::string(kData, kDataSize),
- std::string(read_buffer1, kReadBuffer1Size) +
- std::string(read_buffer2, kReadBuffer2Size));
-}
-
-TEST_F(UploadDiskCacheEntryElementReaderTest, ReadTooMuch) {
- UploadDiskCacheEntryElementReader reader(entry(), kTestDiskCacheStreamIndex,
- 0, kDataSize);
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
-
- const size_t kTooLargeSize = kDataSize + kDataSize / 2;
-
- char read_buffer[kTooLargeSize];
- std::fill(read_buffer, read_buffer + arraysize(read_buffer), '\0');
-
- scoped_refptr<IOBuffer> io_buffer = new WrappedIOBuffer(read_buffer);
- TestCompletionCallback callback;
- int rv = reader.Read(io_buffer.get(), kTooLargeSize, callback.callback());
- EXPECT_EQ(static_cast<int>(kDataSize), callback.GetResult(rv));
- EXPECT_EQ(0U, reader.BytesRemaining());
- EXPECT_EQ(std::string(kData, kDataSize), std::string(read_buffer, kDataSize));
-}
-
-TEST_F(UploadDiskCacheEntryElementReaderTest, ReadAsync) {
- DelayedReadEntry* delayed_read_entry = new DelayedReadEntry(release_entry());
- set_entry(disk_cache::ScopedEntryPtr(delayed_read_entry));
-
- UploadDiskCacheEntryElementReader reader(entry(), kTestDiskCacheStreamIndex,
- 0, kDataSize);
-
- char read_buffer[kDataSize];
- std::fill(read_buffer, read_buffer + arraysize(read_buffer), '\0');
-
- scoped_refptr<IOBuffer> io_buffer = new WrappedIOBuffer(read_buffer);
- TestCompletionCallback callback;
- int rv = reader.Read(io_buffer.get(), kDataSize, callback.callback());
- EXPECT_EQ(ERR_IO_PENDING, rv);
- EXPECT_TRUE(delayed_read_entry->HasPendingReadCallbacks());
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
-
- delayed_read_entry->RunPendingReadCallbacks();
- EXPECT_EQ(static_cast<int>(kDataSize), callback.GetResult(rv));
- EXPECT_EQ(0U, reader.BytesRemaining())
- << "Expected a single read of |kDataSize| to retrieve entire entry.";
- EXPECT_EQ(std::string(kData, kDataSize), std::string(read_buffer, kDataSize));
-}
-
-TEST_F(UploadDiskCacheEntryElementReaderTest, MultipleInit) {
- UploadDiskCacheEntryElementReader reader(entry(), kTestDiskCacheStreamIndex,
- 0, kDataSize);
- char read_buffer[kDataSize];
- std::fill(read_buffer, read_buffer + arraysize(read_buffer), '\0');
-
- scoped_refptr<IOBuffer> io_buffer = new WrappedIOBuffer(read_buffer);
- TestCompletionCallback callback;
- int rv = reader.Read(io_buffer.get(), kDataSize, callback.callback());
- EXPECT_EQ(static_cast<int>(kDataSize), callback.GetResult(rv));
- EXPECT_EQ(std::string(kData, kDataSize), std::string(read_buffer, kDataSize));
-
- rv = reader.Init(callback.callback());
- EXPECT_EQ(OK, callback.GetResult(rv));
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
- rv = reader.Read(io_buffer.get(), kDataSize, callback.callback());
- EXPECT_EQ(static_cast<int>(kDataSize), callback.GetResult(rv));
- EXPECT_EQ(std::string(kData, kDataSize), std::string(read_buffer, kDataSize));
-}
-
-TEST_F(UploadDiskCacheEntryElementReaderTest, InitDuringAsyncOperation) {
- DelayedReadEntry* delayed_read_entry = new DelayedReadEntry(release_entry());
- set_entry(disk_cache::ScopedEntryPtr(delayed_read_entry));
-
- UploadDiskCacheEntryElementReader reader(entry(), kTestDiskCacheStreamIndex,
- 0, kDataSize);
- char read_buffer[kDataSize];
- std::fill(read_buffer, read_buffer + arraysize(read_buffer), '\0');
-
- scoped_refptr<IOBuffer> io_buffer = new WrappedIOBuffer(read_buffer);
- TestCompletionCallback read_callback;
- int rv = reader.Read(io_buffer.get(), kDataSize, read_callback.callback());
- EXPECT_EQ(ERR_IO_PENDING, rv);
- EXPECT_TRUE(delayed_read_entry->HasPendingReadCallbacks());
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
-
- TestCompletionCallback init_callback;
- rv = reader.Init(init_callback.callback());
- EXPECT_EQ(OK, init_callback.GetResult(rv));
-
- delayed_read_entry->RunPendingReadCallbacks();
- EXPECT_FALSE(delayed_read_entry->HasPendingReadCallbacks());
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
-
- char read_buffer2[kDataSize];
- std::fill(read_buffer2, read_buffer2 + arraysize(read_buffer2), '\0');
- scoped_refptr<IOBuffer> io_buffer2 = new WrappedIOBuffer(read_buffer2);
- TestCompletionCallback read_callback2;
- rv = reader.Read(io_buffer2.get(), kDataSize, read_callback2.callback());
- EXPECT_EQ(ERR_IO_PENDING, rv);
- EXPECT_TRUE(delayed_read_entry->HasPendingReadCallbacks());
- EXPECT_EQ(static_cast<uint64_t>(kDataSize), reader.BytesRemaining());
-
- delayed_read_entry->RunPendingReadCallbacks();
- EXPECT_FALSE(delayed_read_entry->HasPendingReadCallbacks());
- read_callback2.WaitForResult(); // Succeeds if this does not deadlock.
- EXPECT_EQ(std::string(kData, kDataSize),
- std::string(read_buffer2, kDataSize));
-}
-
-TEST_F(UploadDiskCacheEntryElementReaderTest, Range) {
- const size_t kOffset = kDataSize / 4;
- const size_t kLength = kDataSize / 3;
-
- UploadDiskCacheEntryElementReader reader(entry(), kTestDiskCacheStreamIndex,
- kOffset, kLength);
- EXPECT_EQ(static_cast<uint64_t>(kLength), reader.BytesRemaining());
-
- char read_buffer[kLength];
- std::fill(read_buffer, read_buffer + arraysize(read_buffer), '\0');
-
- scoped_refptr<IOBuffer> io_buffer = new WrappedIOBuffer(read_buffer);
- TestCompletionCallback callback;
- int rv = reader.Read(io_buffer.get(), kLength, callback.callback());
- EXPECT_EQ(static_cast<int>(kLength), callback.GetResult(rv));
- EXPECT_EQ(0U, reader.BytesRemaining());
- EXPECT_EQ(std::string(kData + kOffset, kLength),
- std::string(read_buffer, kLength));
-}
-
-} // namespace
-} // namespace net
diff --git a/net/base/upload_element_reader.cc b/net/base/upload_element_reader.cc
index 70b657c..2a95eb8 100644
--- a/net/base/upload_element_reader.cc
+++ b/net/base/upload_element_reader.cc
@@ -6,11 +6,6 @@
namespace net {
-const UploadDiskCacheEntryElementReader*
-UploadElementReader::AsDiskCacheEntryReaderForTests() const {
- return nullptr;
-}
-
const UploadBytesElementReader* UploadElementReader::AsBytesReader() const {
return nullptr;
}
diff --git a/net/base/upload_element_reader.h b/net/base/upload_element_reader.h
index 267df8c..2814efa 100644
--- a/net/base/upload_element_reader.h
+++ b/net/base/upload_element_reader.h
@@ -13,7 +13,6 @@ namespace net {
class IOBuffer;
class UploadBytesElementReader;
-class UploadDiskCacheEntryElementReader;
class UploadFileElementReader;
// An interface to read an upload data element.
@@ -22,11 +21,6 @@ class NET_EXPORT UploadElementReader {
UploadElementReader() {}
virtual ~UploadElementReader() {}
- // Returns this instance's pointer as UploadDiskCacheEntryElementReader when
- // possible, otherwise returns nullptr.
- virtual const UploadDiskCacheEntryElementReader*
- AsDiskCacheEntryReaderForTests() const;
-
// Returns this instance's pointer as UploadBytesElementReader when possible,
// otherwise returns NULL.
virtual const UploadBytesElementReader* AsBytesReader() const;
diff --git a/net/net.gypi b/net/net.gypi
index 0975011..0ba77a8 100644
--- a/net/net.gypi
+++ b/net/net.gypi
@@ -505,8 +505,6 @@
'base/upload_bytes_element_reader.h',
'base/upload_data_stream.cc',
'base/upload_data_stream.h',
- 'base/upload_disk_cache_entry_element_reader.cc',
- 'base/upload_disk_cache_entry_element_reader.h',
'base/upload_element_reader.cc',
'base/upload_element_reader.h',
'base/upload_file_element_reader.cc',
@@ -1285,7 +1283,6 @@
'base/static_cookie_policy_unittest.cc',
'base/test_completion_callback_unittest.cc',
'base/upload_bytes_element_reader_unittest.cc',
- 'base/upload_disk_cache_entry_element_reader_unittest.cc',
'base/upload_file_element_reader_unittest.cc',
'base/url_util_unittest.cc',
'cert/cert_policy_enforcer_unittest.cc',
diff --git a/storage/browser/BUILD.gn b/storage/browser/BUILD.gn
index 3594d96..6206f37 100644
--- a/storage/browser/BUILD.gn
+++ b/storage/browser/BUILD.gn
@@ -14,6 +14,8 @@ component("browser") {
"blob/blob_data_item.h",
"blob/blob_data_snapshot.cc",
"blob/blob_data_snapshot.h",
+ "blob/blob_reader.cc",
+ "blob/blob_reader.h",
"blob/blob_storage_context.cc",
"blob/blob_storage_context.h",
"blob/blob_url_request_job.cc",
@@ -28,6 +30,8 @@ component("browser") {
"blob/shareable_blob_data_item.h",
"blob/shareable_file_reference.cc",
"blob/shareable_file_reference.h",
+ "blob/upload_blob_element_reader.cc",
+ "blob/upload_blob_element_reader.h",
"blob/view_blob_internals_job.cc",
"blob/view_blob_internals_job.h",
"database/database_quota_client.cc",
diff --git a/storage/browser/blob/blob_data_handle.cc b/storage/browser/blob/blob_data_handle.cc
index e3a4be9..3e864fa 100644
--- a/storage/browser/blob/blob_data_handle.cc
+++ b/storage/browser/blob/blob_data_handle.cc
@@ -8,38 +8,98 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/sequenced_task_runner.h"
+#include "base/task_runner.h"
+#include "base/time/time.h"
#include "storage/browser/blob/blob_data_snapshot.h"
+#include "storage/browser/blob/blob_reader.h"
#include "storage/browser/blob/blob_storage_context.h"
+#include "storage/browser/fileapi/file_stream_reader.h"
+#include "storage/browser/fileapi/file_system_context.h"
+#include "storage/browser/fileapi/file_system_url.h"
+#include "url/gurl.h"
namespace storage {
+namespace {
+
+class FileStreamReaderProviderImpl
+ : public BlobReader::FileStreamReaderProvider {
+ public:
+ FileStreamReaderProviderImpl(FileSystemContext* file_system_context)
+ : file_system_context_(file_system_context) {}
+ ~FileStreamReaderProviderImpl() override {}
+
+ scoped_ptr<FileStreamReader> CreateForLocalFile(
+ base::TaskRunner* task_runner,
+ const base::FilePath& file_path,
+ int64_t initial_offset,
+ const base::Time& expected_modification_time) override {
+ return make_scoped_ptr(FileStreamReader::CreateForLocalFile(
+ task_runner, file_path, initial_offset, expected_modification_time));
+ }
+
+ scoped_ptr<FileStreamReader> CreateFileStreamReader(
+ const GURL& filesystem_url,
+ int64_t offset,
+ int64_t max_bytes_to_read,
+ const base::Time& expected_modification_time) override {
+ return file_system_context_->CreateFileStreamReader(
+ storage::FileSystemURL(
+ file_system_context_->CrackURL(
+ filesystem_url)),
+ offset, max_bytes_to_read,
+ expected_modification_time)
+ .Pass();
+ }
+
+ private:
+ scoped_refptr<FileSystemContext> file_system_context_;
+ DISALLOW_COPY_AND_ASSIGN(FileStreamReaderProviderImpl);
+};
+
+} // namespace
+
BlobDataHandle::BlobDataHandleShared::BlobDataHandleShared(
const std::string& uuid,
- BlobStorageContext* context,
- base::SequencedTaskRunner* task_runner)
- : uuid_(uuid), context_(context->AsWeakPtr()) {
+ const std::string& content_type,
+ const std::string& content_disposition,
+ BlobStorageContext* context)
+ : uuid_(uuid),
+ content_type_(content_type),
+ content_disposition_(content_disposition),
+ context_(context->AsWeakPtr()) {
context_->IncrementBlobRefCount(uuid);
}
+scoped_ptr<BlobReader> BlobDataHandle::CreateReader(
+ FileSystemContext* file_system_context,
+ base::SequencedTaskRunner* file_task_runner) const {
+ return scoped_ptr<BlobReader>(new BlobReader(
+ this, scoped_ptr<BlobReader::FileStreamReaderProvider>(
+ new FileStreamReaderProviderImpl(file_system_context)),
+ file_task_runner));
+}
+
scoped_ptr<BlobDataSnapshot>
BlobDataHandle::BlobDataHandleShared::CreateSnapshot() const {
return context_->CreateSnapshot(uuid_).Pass();
}
-const std::string& BlobDataHandle::BlobDataHandleShared::uuid() const {
- return uuid_;
-}
-
BlobDataHandle::BlobDataHandleShared::~BlobDataHandleShared() {
if (context_.get())
context_->DecrementBlobRefCount(uuid_);
}
BlobDataHandle::BlobDataHandle(const std::string& uuid,
+ const std::string& content_type,
+ const std::string& content_disposition,
BlobStorageContext* context,
- base::SequencedTaskRunner* task_runner)
- : io_task_runner_(task_runner),
- shared_(new BlobDataHandleShared(uuid, context, task_runner)) {
+ base::SequencedTaskRunner* io_task_runner)
+ : io_task_runner_(io_task_runner),
+ shared_(new BlobDataHandleShared(uuid,
+ content_type,
+ content_disposition,
+ context)) {
DCHECK(io_task_runner_.get());
DCHECK(io_task_runner_->RunsTasksOnCurrentThread());
}
@@ -62,7 +122,15 @@ scoped_ptr<BlobDataSnapshot> BlobDataHandle::CreateSnapshot() const {
}
const std::string& BlobDataHandle::uuid() const {
- return shared_->uuid();
+ return shared_->uuid_;
+}
+
+const std::string& BlobDataHandle::content_type() const {
+ return shared_->content_type_;
+}
+
+const std::string& BlobDataHandle::content_disposition() const {
+ return shared_->content_disposition_;
}
} // namespace storage
diff --git a/storage/browser/blob/blob_data_handle.h b/storage/browser/blob/blob_data_handle.h
index 3041241..8eba2c6 100644
--- a/storage/browser/blob/blob_data_handle.h
+++ b/storage/browser/blob/blob_data_handle.h
@@ -8,6 +8,7 @@
#include <string>
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/supports_user_data.h"
#include "storage/browser/storage_browser_export.h"
@@ -19,7 +20,9 @@ class SequencedTaskRunner;
namespace storage {
class BlobDataSnapshot;
+class BlobReader;
class BlobStorageContext;
+class FileSystemContext;
// BlobDataHandle ensures that the underlying blob (keyed by the uuid) remains
// in the BlobStorageContext's collection while this object is alive. Anything
@@ -36,15 +39,25 @@ class STORAGE_EXPORT BlobDataHandle
BlobDataHandle(const BlobDataHandle& other); // May be copied on any thread.
~BlobDataHandle() override; // May be deleted on any thread.
- // A BlobDataSnapshot is used to read the data from the blob. This object is
+ // A BlobReader is used to read the data from the blob. This object is
// intended to be transient and should not be stored for any extended period
// of time.
+ scoped_ptr<BlobReader> CreateReader(
+ FileSystemContext* file_system_context,
+ base::SequencedTaskRunner* file_task_runner) const;
+
+ // May be accessed on any thread.
+ const std::string& uuid() const;
+ // May be accessed on any thread.
+ const std::string& content_type() const;
+ // May be accessed on any thread.
+ const std::string& content_disposition() const;
+
// This call and the destruction of the returned snapshot must be called
// on the IO thread.
+ // TODO(dmurph): Make this protected, where only the BlobReader can call it.
scoped_ptr<BlobDataSnapshot> CreateSnapshot() const;
- const std::string& uuid() const; // May be accessed on any thread.
-
private:
// Internal class whose destructor is guarenteed to be called on the IO
// thread.
@@ -52,11 +65,11 @@ class STORAGE_EXPORT BlobDataHandle
: public base::RefCountedThreadSafe<BlobDataHandleShared> {
public:
BlobDataHandleShared(const std::string& uuid,
- BlobStorageContext* context,
- base::SequencedTaskRunner* task_runner);
+ const std::string& content_type,
+ const std::string& content_disposition,
+ BlobStorageContext* context);
scoped_ptr<BlobDataSnapshot> CreateSnapshot() const;
- const std::string& uuid() const;
private:
friend class base::DeleteHelper<BlobDataHandleShared>;
@@ -66,6 +79,8 @@ class STORAGE_EXPORT BlobDataHandle
virtual ~BlobDataHandleShared();
const std::string uuid_;
+ const std::string content_type_;
+ const std::string content_disposition_;
base::WeakPtr<BlobStorageContext> context_;
DISALLOW_COPY_AND_ASSIGN(BlobDataHandleShared);
@@ -73,8 +88,10 @@ class STORAGE_EXPORT BlobDataHandle
friend class BlobStorageContext;
BlobDataHandle(const std::string& uuid,
+ const std::string& content_type,
+ const std::string& content_disposition,
BlobStorageContext* context,
- base::SequencedTaskRunner* task_runner);
+ base::SequencedTaskRunner* io_task_runner);
scoped_refptr<base::SequencedTaskRunner> io_task_runner_;
scoped_refptr<BlobDataHandleShared> shared_;
diff --git a/storage/browser/blob/blob_data_snapshot.h b/storage/browser/blob/blob_data_snapshot.h
index f0d47227..01ea2ef 100644
--- a/storage/browser/blob/blob_data_snapshot.h
+++ b/storage/browser/blob/blob_data_snapshot.h
@@ -53,6 +53,7 @@ class STORAGE_EXPORT BlobDataSnapshot : public base::SupportsUserData::Data {
const std::string uuid_;
const std::string content_type_;
const std::string content_disposition_;
+
// Non-const for constrution in BlobStorageContext
std::vector<scoped_refptr<BlobDataItem>> items_;
};
diff --git a/storage/browser/blob/blob_reader.cc b/storage/browser/blob/blob_reader.cc
new file mode 100644
index 0000000..ccb4e55
--- /dev/null
+++ b/storage/browser/blob/blob_reader.cc
@@ -0,0 +1,568 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "storage/browser/blob/blob_reader.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "base/bind.h"
+#include "base/sequenced_task_runner.h"
+#include "base/stl_util.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache.h"
+#include "storage/browser/blob/blob_data_handle.h"
+#include "storage/browser/blob/blob_data_snapshot.h"
+#include "storage/browser/fileapi/file_stream_reader.h"
+#include "storage/browser/fileapi/file_system_context.h"
+#include "storage/browser/fileapi/file_system_url.h"
+#include "storage/common/data_element.h"
+
+namespace storage {
+namespace {
+bool IsFileType(DataElement::Type type) {
+ switch (type) {
+ case DataElement::TYPE_FILE:
+ case DataElement::TYPE_FILE_FILESYSTEM:
+ return true;
+ default:
+ return false;
+ }
+}
+} // namespace
+
+BlobReader::FileStreamReaderProvider::~FileStreamReaderProvider() {}
+
+BlobReader::BlobReader(
+ const BlobDataHandle* blob_handle,
+ scoped_ptr<FileStreamReaderProvider> file_stream_provider,
+ base::SequencedTaskRunner* file_task_runner)
+ : file_stream_provider_(file_stream_provider.Pass()),
+ file_task_runner_(file_task_runner),
+ net_error_(net::OK),
+ weak_factory_(this) {
+ if (blob_handle) {
+ blob_data_ = blob_handle->CreateSnapshot().Pass();
+ }
+}
+
+BlobReader::~BlobReader() {
+ STLDeleteValues(&index_to_reader_);
+}
+
+BlobReader::Status BlobReader::CalculateSize(
+ const net::CompletionCallback& done) {
+ DCHECK(!total_size_calculated_);
+ DCHECK(size_callback_.is_null());
+ if (!blob_data_.get()) {
+ return ReportError(net::ERR_FILE_NOT_FOUND);
+ }
+
+ net_error_ = net::OK;
+ total_size_ = 0;
+ const auto& items = blob_data_->items();
+ item_length_list_.resize(items.size());
+ pending_get_file_info_count_ = 0;
+ for (size_t i = 0; i < items.size(); ++i) {
+ const BlobDataItem& item = *items.at(i);
+ if (IsFileType(item.type())) {
+ ++pending_get_file_info_count_;
+ storage::FileStreamReader* const reader = GetOrCreateFileReaderAtIndex(i);
+ if (!reader) {
+ return ReportError(net::ERR_FAILED);
+ }
+ int64_t length_output = reader->GetLength(base::Bind(
+ &BlobReader::DidGetFileItemLength, weak_factory_.GetWeakPtr(), i));
+ if (length_output == net::ERR_IO_PENDING) {
+ continue;
+ }
+ if (length_output < 0) {
+ return ReportError(length_output);
+ }
+ // We got the length right away
+ --pending_get_file_info_count_;
+ uint64_t resolved_length;
+ if (!ResolveFileItemLength(item, length_output, &resolved_length)) {
+ return ReportError(net::ERR_FILE_NOT_FOUND);
+ }
+ if (!AddItemLength(i, resolved_length)) {
+ return ReportError(net::ERR_FAILED);
+ }
+ continue;
+ }
+
+ if (!AddItemLength(i, item.length()))
+ return ReportError(net::ERR_FAILED);
+ }
+
+ if (pending_get_file_info_count_ == 0) {
+ DidCountSize();
+ return Status::DONE;
+ }
+ // Note: We only set the callback if we know that we're an async operation.
+ size_callback_ = done;
+ return Status::IO_PENDING;
+}
+
+BlobReader::Status BlobReader::SetReadRange(uint64_t offset, uint64_t length) {
+ if (!blob_data_.get()) {
+ return ReportError(net::ERR_FILE_NOT_FOUND);
+ }
+ if (!total_size_calculated_) {
+ return ReportError(net::ERR_FAILED);
+ }
+ if (offset + length > total_size_) {
+ return ReportError(net::ERR_FILE_NOT_FOUND);
+ }
+ // Skip the initial items that are not in the range.
+ remaining_bytes_ = length;
+ const auto& items = blob_data_->items();
+ for (current_item_index_ = 0;
+ current_item_index_ < items.size() &&
+ offset >= item_length_list_[current_item_index_];
+ ++current_item_index_) {
+ offset -= item_length_list_[current_item_index_];
+ }
+
+ // Set the offset that need to jump to for the first item in the range.
+ current_item_offset_ = offset;
+ if (current_item_offset_ == 0)
+ return Status::DONE;
+
+ // Adjust the offset of the first stream if it is of file type.
+ const BlobDataItem& item = *items.at(current_item_index_);
+ if (IsFileType(item.type())) {
+ SetFileReaderAtIndex(current_item_index_,
+ CreateFileStreamReader(item, offset));
+ }
+ return Status::DONE;
+}
+
+BlobReader::Status BlobReader::Read(net::IOBuffer* buffer,
+ size_t dest_size,
+ int* bytes_read,
+ net::CompletionCallback done) {
+ DCHECK(bytes_read);
+ DCHECK_GE(remaining_bytes_, 0ul);
+ DCHECK(read_callback_.is_null());
+
+ *bytes_read = 0;
+ if (!blob_data_.get()) {
+ return ReportError(net::ERR_FILE_NOT_FOUND);
+ }
+ if (!total_size_calculated_) {
+ return ReportError(net::ERR_FAILED);
+ }
+
+ // Bail out immediately if we encountered an error.
+ if (net_error_ != net::OK) {
+ return Status::NET_ERROR;
+ }
+
+ DCHECK_GE(dest_size, 0ul);
+ if (remaining_bytes_ < static_cast<uint64_t>(dest_size))
+ dest_size = static_cast<int>(remaining_bytes_);
+
+ // If we should copy zero bytes because |remaining_bytes_| is zero, short
+ // circuit here.
+ if (!dest_size) {
+ *bytes_read = 0;
+ return Status::DONE;
+ }
+
+ // Keep track of the buffer.
+ DCHECK(!read_buf_.get());
+ read_buf_ = new net::DrainableIOBuffer(buffer, dest_size);
+
+ Status status = ReadLoop(bytes_read);
+ if (status == Status::IO_PENDING)
+ read_callback_ = done;
+ return status;
+}
+
+void BlobReader::Kill() {
+ DeleteCurrentFileReader();
+ weak_factory_.InvalidateWeakPtrs();
+}
+
+bool BlobReader::IsInMemory() const {
+ if (!blob_data_.get()) {
+ return true;
+ }
+ for (const auto& item : blob_data_->items()) {
+ if (item->type() != DataElement::TYPE_BYTES) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void BlobReader::InvalidateCallbacksAndDone(int net_error,
+ net::CompletionCallback done) {
+ net_error_ = net_error;
+ weak_factory_.InvalidateWeakPtrs();
+ size_callback_.Reset();
+ read_callback_.Reset();
+ read_buf_ = nullptr;
+ done.Run(net_error);
+}
+
+BlobReader::Status BlobReader::ReportError(int net_error) {
+ net_error_ = net_error;
+ return Status::NET_ERROR;
+}
+
+bool BlobReader::AddItemLength(size_t index, uint64_t item_length) {
+ if (item_length > std::numeric_limits<uint64_t>::max() - total_size_) {
+ return false;
+ }
+
+ // Cache the size and add it to the total size.
+ DCHECK_LT(index, item_length_list_.size());
+ item_length_list_[index] = item_length;
+ total_size_ += item_length;
+ return true;
+}
+
+bool BlobReader::ResolveFileItemLength(const BlobDataItem& item,
+ int64_t total_length,
+ uint64_t* output_length) {
+ DCHECK(IsFileType(item.type()));
+ DCHECK(output_length);
+ uint64_t file_length = total_length;
+ uint64_t item_offset = item.offset();
+ uint64_t item_length = item.length();
+ if (item_offset > file_length) {
+ return false;
+ }
+
+ uint64 max_length = file_length - item_offset;
+
+ // If item length is undefined, then we need to use the file size being
+ // resolved in the real time.
+ if (item_length == std::numeric_limits<uint64>::max()) {
+ item_length = max_length;
+ } else if (item_length > max_length) {
+ return false;
+ }
+
+ *output_length = item_length;
+ return true;
+}
+
+void BlobReader::DidGetFileItemLength(size_t index, int64_t result) {
+ // Do nothing if we have encountered an error.
+ if (net_error_)
+ return;
+
+ if (result == net::ERR_UPLOAD_FILE_CHANGED)
+ result = net::ERR_FILE_NOT_FOUND;
+ if (result < 0) {
+ InvalidateCallbacksAndDone(result, size_callback_);
+ return;
+ }
+
+ const auto& items = blob_data_->items();
+ DCHECK_LT(index, items.size());
+ const BlobDataItem& item = *items.at(index);
+ uint64_t length;
+ if (!ResolveFileItemLength(item, result, &length)) {
+ InvalidateCallbacksAndDone(net::ERR_FILE_NOT_FOUND, size_callback_);
+ return;
+ }
+ if (!AddItemLength(index, length)) {
+ InvalidateCallbacksAndDone(net::ERR_FAILED, size_callback_);
+ return;
+ }
+
+ if (--pending_get_file_info_count_ == 0)
+ DidCountSize();
+}
+
+void BlobReader::DidCountSize() {
+ DCHECK(!net_error_);
+ total_size_calculated_ = true;
+ remaining_bytes_ = total_size_;
+ // This is set only if we're async.
+ if (!size_callback_.is_null()) {
+ net::CompletionCallback done = size_callback_;
+ size_callback_.Reset();
+ done.Run(net::OK);
+ }
+}
+
+BlobReader::Status BlobReader::ReadLoop(int* bytes_read) {
+ // Read until we encounter an error or could not get the data immediately.
+ while (remaining_bytes_ > 0 && read_buf_->BytesRemaining() > 0) {
+ Status read_status = ReadItem();
+ if (read_status == Status::DONE) {
+ continue;
+ }
+ return read_status;
+ }
+
+ *bytes_read = BytesReadCompleted();
+ return Status::DONE;
+}
+
+BlobReader::Status BlobReader::ReadItem() {
+ // Are we done with reading all the blob data?
+ if (remaining_bytes_ == 0)
+ return Status::DONE;
+
+ const auto& items = blob_data_->items();
+ // If we get to the last item but still expect something to read, bail out
+ // since something is wrong.
+ if (current_item_index_ >= items.size()) {
+ return ReportError(net::ERR_FAILED);
+ }
+
+ // Compute the bytes to read for current item.
+ int bytes_to_read = ComputeBytesToRead();
+
+ // If nothing to read for current item, advance to next item.
+ if (bytes_to_read == 0) {
+ AdvanceItem();
+ return Status::DONE;
+ }
+
+ // Do the reading.
+ const BlobDataItem& item = *items.at(current_item_index_);
+ if (item.type() == DataElement::TYPE_BYTES) {
+ ReadBytesItem(item, bytes_to_read);
+ return Status::DONE;
+ }
+ if (item.type() == DataElement::TYPE_DISK_CACHE_ENTRY)
+ return ReadDiskCacheEntryItem(item, bytes_to_read);
+ if (!IsFileType(item.type())) {
+ NOTREACHED();
+ return ReportError(net::ERR_FAILED);
+ }
+ storage::FileStreamReader* const reader =
+ GetOrCreateFileReaderAtIndex(current_item_index_);
+ if (!reader) {
+ return ReportError(net::ERR_FAILED);
+ }
+
+ return ReadFileItem(reader, bytes_to_read);
+}
+
+void BlobReader::AdvanceItem() {
+ // Close the file if the current item is a file.
+ DeleteCurrentFileReader();
+
+ // Advance to the next item.
+ current_item_index_++;
+ current_item_offset_ = 0;
+}
+
+void BlobReader::AdvanceBytesRead(int result) {
+ DCHECK_GT(result, 0);
+
+ // Do we finish reading the current item?
+ current_item_offset_ += result;
+ if (current_item_offset_ == item_length_list_[current_item_index_])
+ AdvanceItem();
+
+ // Subtract the remaining bytes.
+ remaining_bytes_ -= result;
+ DCHECK_GE(remaining_bytes_, 0ul);
+
+ // Adjust the read buffer.
+ read_buf_->DidConsume(result);
+ DCHECK_GE(read_buf_->BytesRemaining(), 0);
+}
+
+void BlobReader::ReadBytesItem(const BlobDataItem& item, int bytes_to_read) {
+ TRACE_EVENT1("Blob", "BlobReader::ReadBytesItem", "uuid", blob_data_->uuid());
+ DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
+
+ memcpy(read_buf_->data(), item.bytes() + item.offset() + current_item_offset_,
+ bytes_to_read);
+
+ AdvanceBytesRead(bytes_to_read);
+}
+
+BlobReader::Status BlobReader::ReadFileItem(FileStreamReader* reader,
+ int bytes_to_read) {
+ DCHECK(!io_pending_)
+ << "Can't begin IO while another IO operation is pending.";
+ DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
+ DCHECK(reader);
+ TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
+ blob_data_->uuid());
+ const int result = reader->Read(
+ read_buf_.get(), bytes_to_read,
+ base::Bind(&BlobReader::DidReadFile, weak_factory_.GetWeakPtr()));
+ if (result >= 0) {
+ AdvanceBytesRead(result);
+ return Status::DONE;
+ }
+ if (result == net::ERR_IO_PENDING) {
+ io_pending_ = true;
+ return Status::IO_PENDING;
+ }
+ return ReportError(result);
+}
+
+void BlobReader::DidReadFile(int result) {
+ TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
+ blob_data_->uuid());
+ DidReadItem(result);
+}
+
+void BlobReader::ContinueAsyncReadLoop() {
+ int bytes_read = 0;
+ Status read_status = ReadLoop(&bytes_read);
+ switch (read_status) {
+ case Status::DONE: {
+ net::CompletionCallback done = read_callback_;
+ read_callback_.Reset();
+ done.Run(bytes_read);
+ return;
+ }
+ case Status::NET_ERROR:
+ InvalidateCallbacksAndDone(net_error_, read_callback_);
+ return;
+ case Status::IO_PENDING:
+ return;
+ }
+}
+
+void BlobReader::DeleteCurrentFileReader() {
+ SetFileReaderAtIndex(current_item_index_, scoped_ptr<FileStreamReader>());
+}
+
+BlobReader::Status BlobReader::ReadDiskCacheEntryItem(const BlobDataItem& item,
+ int bytes_to_read) {
+ DCHECK(!io_pending_)
+ << "Can't begin IO while another IO operation is pending.";
+ TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadDiskCacheItem", this,
+ "uuid", blob_data_->uuid());
+ DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
+
+ const int result = item.disk_cache_entry()->ReadData(
+ item.disk_cache_stream_index(), current_item_offset_, read_buf_.get(),
+ bytes_to_read, base::Bind(&BlobReader::DidReadDiskCacheEntry,
+ weak_factory_.GetWeakPtr()));
+ if (result >= 0) {
+ AdvanceBytesRead(result);
+ return Status::DONE;
+ }
+ if (result == net::ERR_IO_PENDING) {
+ io_pending_ = true;
+ return Status::IO_PENDING;
+ }
+ return ReportError(result);
+}
+
+void BlobReader::DidReadDiskCacheEntry(int result) {
+ TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadDiskCacheItem", this, "uuid",
+ blob_data_->uuid());
+ DidReadItem(result);
+}
+
+void BlobReader::DidReadItem(int result) {
+ DCHECK(io_pending_) << "Asynchronous IO completed while IO wasn't pending?";
+ io_pending_ = false;
+ if (result <= 0) {
+ InvalidateCallbacksAndDone(result, read_callback_);
+ return;
+ }
+ AdvanceBytesRead(result);
+ ContinueAsyncReadLoop();
+}
+
+int BlobReader::BytesReadCompleted() {
+ int bytes_read = read_buf_->BytesConsumed();
+ read_buf_ = nullptr;
+ return bytes_read;
+}
+
+int BlobReader::ComputeBytesToRead() const {
+ uint64_t current_item_length = item_length_list_[current_item_index_];
+
+ uint64_t item_remaining = current_item_length - current_item_offset_;
+ uint64_t buf_remaining = read_buf_->BytesRemaining();
+ uint64_t max_int_value = std::numeric_limits<int>::max();
+ // Here we make sure we don't overflow 'max int'.
+ uint64_t min = std::min(
+ std::min(std::min(item_remaining, buf_remaining), remaining_bytes_),
+ max_int_value);
+
+ return static_cast<int>(min);
+}
+
+FileStreamReader* BlobReader::GetOrCreateFileReaderAtIndex(size_t index) {
+ const auto& items = blob_data_->items();
+ DCHECK_LT(index, items.size());
+ const BlobDataItem& item = *items.at(index);
+ if (!IsFileType(item.type()))
+ return nullptr;
+ auto it = index_to_reader_.find(index);
+ if (it != index_to_reader_.end()) {
+ DCHECK(it->second);
+ return it->second;
+ }
+ scoped_ptr<FileStreamReader> reader = CreateFileStreamReader(item, 0);
+ FileStreamReader* ret_value = reader.get();
+ if (!ret_value)
+ return nullptr;
+ index_to_reader_[index] = reader.release();
+ return ret_value;
+}
+
+scoped_ptr<FileStreamReader> BlobReader::CreateFileStreamReader(
+ const BlobDataItem& item,
+ uint64_t additional_offset) {
+ DCHECK(IsFileType(item.type()));
+
+ switch (item.type()) {
+ case DataElement::TYPE_FILE:
+ return file_stream_provider_->CreateForLocalFile(
+ file_task_runner_.get(), item.path(),
+ item.offset() + additional_offset,
+ item.expected_modification_time())
+ .Pass();
+ case DataElement::TYPE_FILE_FILESYSTEM:
+ return file_stream_provider_
+ ->CreateFileStreamReader(
+ item.filesystem_url(), item.offset() + additional_offset,
+ item.length() == std::numeric_limits<uint64_t>::max()
+ ? storage::kMaximumLength
+ : item.length() - additional_offset,
+ item.expected_modification_time())
+ .Pass();
+ case DataElement::TYPE_BLOB:
+ case DataElement::TYPE_BYTES:
+ case DataElement::TYPE_DISK_CACHE_ENTRY:
+ case DataElement::TYPE_UNKNOWN:
+ break;
+ }
+
+ NOTREACHED();
+ return nullptr;
+}
+
+void BlobReader::SetFileReaderAtIndex(size_t index,
+ scoped_ptr<FileStreamReader> reader) {
+ auto found = index_to_reader_.find(current_item_index_);
+ if (found != index_to_reader_.end()) {
+ if (found->second) {
+ delete found->second;
+ }
+ if (!reader.get()) {
+ index_to_reader_.erase(found);
+ return;
+ }
+ found->second = reader.release();
+ } else if (reader.get()) {
+ index_to_reader_[current_item_index_] = reader.release();
+ }
+}
+
+} // namespace storage
diff --git a/storage/browser/blob/blob_reader.h b/storage/browser/blob/blob_reader.h
new file mode 100644
index 0000000..54b262f
--- /dev/null
+++ b/storage/browser/blob/blob_reader.h
@@ -0,0 +1,190 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef STORAGE_BROWSER_BLOB_BLOB_READER_H_
+#define STORAGE_BROWSER_BLOB_BLOB_READER_H_
+
+#include <stdint.h>
+#include <map>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "net/base/completion_callback.h"
+#include "storage/browser/storage_browser_export.h"
+
+class GURL;
+
+namespace base {
+class FilePath;
+class SequencedTaskRunner;
+class TaskRunner;
+class Time;
+}
+
+namespace net {
+class DrainableIOBuffer;
+class IOBuffer;
+}
+
+namespace storage {
+class BlobDataItem;
+class BlobDataHandle;
+class BlobDataSnapshot;
+class FileStreamReader;
+class FileSystemContext;
+
+// The blob reader is used to read a blob. This can only be used in the browser
+// process, and we need to be on the IO thread.
+// * There can only be one read happening at a time per reader.
+// * If a status of Status::NET_ERROR is returned, that means there was an
+// error and the net_error() variable contains the error code.
+// Use a BlobDataHandle to create an instance.
+class STORAGE_EXPORT BlobReader {
+ public:
+ class STORAGE_EXPORT FileStreamReaderProvider {
+ public:
+ virtual ~FileStreamReaderProvider();
+
+ virtual scoped_ptr<FileStreamReader> CreateForLocalFile(
+ base::TaskRunner* task_runner,
+ const base::FilePath& file_path,
+ int64_t initial_offset,
+ const base::Time& expected_modification_time) = 0;
+
+ virtual scoped_ptr<FileStreamReader> CreateFileStreamReader(
+ const GURL& filesystem_url,
+ int64_t offset,
+ int64_t max_bytes_to_read,
+ const base::Time& expected_modification_time) = 0;
+ };
+ enum class Status { NET_ERROR, IO_PENDING, DONE };
+ virtual ~BlobReader();
+
+ // This calculates the total size of the blob, and initializes the reading
+ // cursor.
+ // * This should only be called once per reader.
+ // * Status::Done means that the total_size() value is populated and you can
+ // continue to SetReadRange or Read.
+ // * The 'done' callback is only called if Status::IO_PENDING is returned.
+ // The callback value contains the error code or net::OK. Please use the
+ // total_size() value to query the blob size, as it's uint64_t.
+ Status CalculateSize(const net::CompletionCallback& done);
+
+ // Used to set the read position.
+ // * This should be called after CalculateSize and before Read.
+ // * Range can only be set once.
+ Status SetReadRange(uint64_t position, uint64_t length);
+
+ // Reads a portion of the data.
+ // * CalculateSize (and optionally SetReadRange) must be called beforehand.
+ // * bytes_read is populated only if Status::DONE is returned. Otherwise the
+ // bytes read (or error code) is populated in the 'done' callback.
+ // * The done callback is only called if Status::IO_PENDING is returned.
+ // * This method can be called multiple times. A bytes_read value (either from
+ // the callback for Status::IO_PENDING or the bytes_read value for
+ // Status::DONE) of 0 means we're finished reading.
+ Status Read(net::IOBuffer* buffer,
+ size_t dest_size,
+ int* bytes_read,
+ net::CompletionCallback done);
+
+ // Kills reading and invalidates all callbacks. The reader cannot be used
+ // after this call.
+ void Kill();
+
+ // Returns if all of the blob's items are in memory.
+ bool IsInMemory() const;
+
+ // Returns the remaining bytes to be read in the blob. This is populated
+ // after CalculateSize, and is modified by SetReadRange.
+ uint64_t remaining_bytes() const { return remaining_bytes_; }
+
+ // Returns the net error code if there was an error. Defaults to net::OK.
+ int net_error() const { return net_error_; }
+
+ // Returns the total size of the blob. This is populated after CalculateSize
+ // is called.
+ uint64_t total_size() const { return total_size_; }
+
+ protected:
+ friend class BlobDataHandle;
+ friend class BlobReaderTest;
+
+ BlobReader(const BlobDataHandle* blob_handle,
+ scoped_ptr<FileStreamReaderProvider> file_stream_provider,
+ base::SequencedTaskRunner* file_task_runner);
+
+ bool total_size_calculated() const { return total_size_calculated_; }
+
+ private:
+ Status ReportError(int net_error);
+ void InvalidateCallbacksAndDone(int net_error, net::CompletionCallback done);
+
+ bool AddItemLength(size_t index, uint64_t length);
+ bool ResolveFileItemLength(const BlobDataItem& item,
+ int64_t total_length,
+ uint64_t* output_length);
+ void DidGetFileItemLength(size_t index, int64_t result);
+ void DidCountSize();
+
+ // For reading the blob.
+ // Returns if we're done, PENDING_IO if we're waiting on async.
+ Status ReadLoop(int* bytes_read);
+ // Called from asynchronously called methods to continue the read loop.
+ void ContinueAsyncReadLoop();
+ // PENDING_IO means we're waiting on async.
+ Status ReadItem();
+ void AdvanceItem();
+ void AdvanceBytesRead(int result);
+ void ReadBytesItem(const BlobDataItem& item, int bytes_to_read);
+ BlobReader::Status ReadFileItem(FileStreamReader* reader, int bytes_to_read);
+ void DidReadFile(int result);
+ void DeleteCurrentFileReader();
+ Status ReadDiskCacheEntryItem(const BlobDataItem& item, int bytes_to_read);
+ void DidReadDiskCacheEntry(int result);
+ void DidReadItem(int result);
+ int ComputeBytesToRead() const;
+ int BytesReadCompleted();
+
+ // Returns a FileStreamReader for a blob item at |index|.
+ // If the item at |index| is not of file this returns NULL.
+ FileStreamReader* GetOrCreateFileReaderAtIndex(size_t index);
+ // If the reader is null, then this basically performs a delete operation.
+ void SetFileReaderAtIndex(size_t index, scoped_ptr<FileStreamReader> reader);
+ // Creates a FileStreamReader for the item with additional_offset.
+ scoped_ptr<FileStreamReader> CreateFileStreamReader(
+ const BlobDataItem& item,
+ uint64_t additional_offset);
+
+ scoped_ptr<BlobDataSnapshot> blob_data_;
+ scoped_ptr<FileStreamReaderProvider> file_stream_provider_;
+ scoped_refptr<base::SequencedTaskRunner> file_task_runner_;
+
+ int net_error_;
+ bool item_list_populated_ = false;
+ std::vector<uint64_t> item_length_list_;
+
+ scoped_refptr<net::DrainableIOBuffer> read_buf_;
+
+ bool total_size_calculated_ = false;
+ uint64_t total_size_ = 0;
+ uint64_t remaining_bytes_ = 0;
+ size_t pending_get_file_info_count_ = 0;
+ std::map<size_t, FileStreamReader*> index_to_reader_;
+ size_t current_item_index_ = 0;
+ uint64_t current_item_offset_ = 0;
+
+ bool io_pending_ = false;
+
+ net::CompletionCallback size_callback_;
+ net::CompletionCallback read_callback_;
+
+ base::WeakPtrFactory<BlobReader> weak_factory_;
+ DISALLOW_COPY_AND_ASSIGN(BlobReader);
+};
+
+} // namespace storage
+#endif // STORAGE_BROWSER_BLOB_BLOB_READER_H_
diff --git a/storage/browser/blob/blob_storage_context.cc b/storage/browser/blob/blob_storage_context.cc
index d08ccb3..7114696 100644
--- a/storage/browser/blob/blob_storage_context.cc
+++ b/storage/browser/blob/blob_storage_context.cc
@@ -77,9 +77,9 @@ scoped_ptr<BlobDataHandle> BlobStorageContext::GetBlobDataFromUUID(
if (entry->flags & EXCEEDED_MEMORY)
return result.Pass();
DCHECK(!entry->IsBeingBuilt());
- result.reset(
- new BlobDataHandle(uuid, this,
- base::ThreadTaskRunnerHandle::Get().get()));
+ result.reset(new BlobDataHandle(uuid, entry->data->content_type(),
+ entry->data->content_disposition(), this,
+ base::ThreadTaskRunnerHandle::Get().get()));
return result.Pass();
}
diff --git a/storage/browser/blob/blob_url_request_job.cc b/storage/browser/blob/blob_url_request_job.cc
index f429020..6ec2fe5 100644
--- a/storage/browser/blob/blob_url_request_job.cc
+++ b/storage/browser/blob/blob_url_request_job.cc
@@ -31,6 +31,8 @@
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_error_job.h"
#include "net/url_request/url_request_status.h"
+#include "storage/browser/blob/blob_data_handle.h"
+#include "storage/browser/blob/blob_reader.h"
#include "storage/browser/fileapi/file_stream_reader.h"
#include "storage/browser/fileapi/file_system_context.h"
#include "storage/browser/fileapi/file_system_url.h"
@@ -38,41 +40,24 @@
namespace storage {
-namespace {
-
-bool IsFileType(DataElement::Type type) {
- switch (type) {
- case DataElement::TYPE_FILE:
- case DataElement::TYPE_FILE_FILESYSTEM:
- return true;
- default:
- return false;
- }
-}
-
-} // namespace
-
BlobURLRequestJob::BlobURLRequestJob(
net::URLRequest* request,
net::NetworkDelegate* network_delegate,
- scoped_ptr<BlobDataSnapshot> blob_data,
- storage::FileSystemContext* file_system_context,
+ BlobDataHandle* blob_handle,
+ FileSystemContext* file_system_context,
base::SingleThreadTaskRunner* file_task_runner)
: net::URLRequestJob(request, network_delegate),
- blob_data_(blob_data.Pass()),
- file_system_context_(file_system_context),
- file_task_runner_(file_task_runner),
- total_size_(0),
- remaining_bytes_(0),
- pending_get_file_info_count_(0),
- current_item_index_(0),
- current_item_offset_(0),
error_(false),
byte_range_set_(false),
weak_factory_(this) {
TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest", this, "uuid",
- blob_data_ ? blob_data_->uuid() : "NotFound");
- DCHECK(file_task_runner_.get());
+ blob_handle ? blob_handle->uuid() : "NotFound");
+ DCHECK(file_task_runner);
+ if (blob_handle) {
+ blob_handle_.reset(new BlobDataHandle(*blob_handle));
+ blob_reader_ =
+ blob_handle_->CreateReader(file_system_context, file_task_runner);
+ }
}
void BlobURLRequestJob::Start() {
@@ -83,8 +68,9 @@ void BlobURLRequestJob::Start() {
}
void BlobURLRequestJob::Kill() {
- DeleteCurrentFileReader();
-
+ if (blob_reader_) {
+ blob_reader_->Kill();
+ }
net::URLRequestJob::Kill();
weak_factory_.InvalidateWeakPtrs();
}
@@ -92,9 +78,10 @@ void BlobURLRequestJob::Kill() {
bool BlobURLRequestJob::ReadRawData(net::IOBuffer* dest,
int dest_size,
int* bytes_read) {
+ TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadRawData", this, "uuid",
+ blob_handle_ ? blob_handle_->uuid() : "NotFound");
DCHECK_NE(dest_size, 0);
DCHECK(bytes_read);
- DCHECK_GE(remaining_bytes_, 0);
// Bail out immediately if we encounter an error.
if (error_) {
@@ -102,21 +89,27 @@ bool BlobURLRequestJob::ReadRawData(net::IOBuffer* dest,
return true;
}
- if (remaining_bytes_ < dest_size)
- dest_size = static_cast<int>(remaining_bytes_);
+ BlobReader::Status read_status =
+ blob_reader_->Read(dest, dest_size, bytes_read,
+ base::Bind(&BlobURLRequestJob::DidReadRawData,
+ weak_factory_.GetWeakPtr()));
- // If we should copy zero bytes because |remaining_bytes_| is zero, short
- // circuit here.
- if (!dest_size) {
- *bytes_read = 0;
- return true;
+ switch (read_status) {
+ case BlobReader::Status::NET_ERROR:
+ NotifyFailure(blob_reader_->net_error());
+ TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
+ blob_handle_ ? blob_handle_->uuid() : "NotFound");
+ return false;
+ case BlobReader::Status::IO_PENDING:
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return false;
+ case BlobReader::Status::DONE:
+ TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
+ blob_handle_ ? blob_handle_->uuid() : "NotFound");
+ return true;
}
-
- // Keep track of the buffer.
- DCHECK(!read_buf_.get());
- read_buf_ = new net::DrainableIOBuffer(dest, dest_size);
-
- return ReadLoop(bytes_read);
+ NOTREACHED();
+ return true;
}
bool BlobURLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -159,13 +152,11 @@ void BlobURLRequestJob::SetExtraRequestHeaders(
}
BlobURLRequestJob::~BlobURLRequestJob() {
- STLDeleteValues(&index_to_reader_);
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest", this, "uuid",
- blob_data_ ? blob_data_->uuid() : "NotFound");
+ blob_handle_ ? blob_handle_->uuid() : "NotFound");
}
void BlobURLRequestJob::DidStart() {
- current_file_chunk_number_ = 0;
error_ = false;
// We only support GET request per the spec.
@@ -175,369 +166,69 @@ void BlobURLRequestJob::DidStart() {
}
// If the blob data is not present, bail out.
- if (!blob_data_) {
+ if (!blob_handle_) {
NotifyFailure(net::ERR_FILE_NOT_FOUND);
return;
}
- CountSize();
-}
-
-bool BlobURLRequestJob::AddItemLength(size_t index, int64 item_length) {
- if (item_length > kint64max - total_size_) {
- TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::CountSize", this, "uuid",
- blob_data_->uuid());
- NotifyFailure(net::ERR_FAILED);
- return false;
- }
-
- // Cache the size and add it to the total size.
- DCHECK_LT(index, item_length_list_.size());
- item_length_list_[index] = item_length;
- total_size_ += item_length;
- return true;
-}
-
-bool BlobURLRequestJob::CountSize() {
TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::CountSize", this, "uuid",
- blob_data_->uuid());
- pending_get_file_info_count_ = 0;
- total_size_ = 0;
- const auto& items = blob_data_->items();
- item_length_list_.resize(items.size());
-
- for (size_t i = 0; i < items.size(); ++i) {
- const BlobDataItem& item = *items.at(i);
- if (IsFileType(item.type())) {
- ++pending_get_file_info_count_;
- storage::FileStreamReader* const reader = GetFileStreamReader(i);
- if (!reader) {
- NotifyFailure(net::ERR_FAILED);
- return false;
- }
- if (!reader->GetLength(
- base::Bind(&BlobURLRequestJob::DidGetFileItemLength,
- weak_factory_.GetWeakPtr(), i))) {
- NotifyFailure(net::ERR_FILE_NOT_FOUND);
- return false;
- }
- continue;
- }
-
- if (!AddItemLength(i, item.length()))
- return false;
+ blob_handle_->uuid());
+ BlobReader::Status size_status = blob_reader_->CalculateSize(base::Bind(
+ &BlobURLRequestJob::DidCalculateSize, weak_factory_.GetWeakPtr()));
+ switch (size_status) {
+ case BlobReader::Status::NET_ERROR:
+ NotifyFailure(blob_reader_->net_error());
+ return;
+ case BlobReader::Status::IO_PENDING:
+ SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
+ return;
+ case BlobReader::Status::DONE:
+ DidCalculateSize(net::OK);
+ return;
}
-
- if (pending_get_file_info_count_ == 0)
- DidCountSize(net::OK);
-
- return true;
}
-void BlobURLRequestJob::DidCountSize(int error) {
- DCHECK(!error_);
+void BlobURLRequestJob::DidCalculateSize(int result) {
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::CountSize", this, "uuid",
- blob_data_->uuid());
+ blob_handle_->uuid());
+ // Clear the IO_PENDING status
+ SetStatus(net::URLRequestStatus());
- // If an error occured, bail out.
- if (error != net::OK) {
- NotifyFailure(error);
+ if (result != net::OK) {
+ NotifyFailure(result);
return;
}
// Apply the range requirement.
- if (!byte_range_.ComputeBounds(total_size_)) {
+ if (!byte_range_.ComputeBounds(blob_reader_->total_size())) {
NotifyFailure(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
return;
}
- remaining_bytes_ = base::checked_cast<int64>(
+ DCHECK_LE(byte_range_.first_byte_position(),
+ byte_range_.last_byte_position() + 1);
+ uint64_t length = base::checked_cast<uint64_t>(
byte_range_.last_byte_position() - byte_range_.first_byte_position() + 1);
- DCHECK_GE(remaining_bytes_, 0);
- // Do the seek at the beginning of the request.
- if (byte_range_.first_byte_position())
- Seek(byte_range_.first_byte_position());
+ if (byte_range_set_)
+ blob_reader_->SetReadRange(byte_range_.first_byte_position(), length);
- NotifySuccess();
-}
-
-void BlobURLRequestJob::DidGetFileItemLength(size_t index, int64 result) {
- // Do nothing if we have encountered an error.
- if (error_)
- return;
-
- if (result == net::ERR_UPLOAD_FILE_CHANGED) {
- NotifyFailure(net::ERR_FILE_NOT_FOUND);
- return;
- } else if (result < 0) {
- NotifyFailure(result);
- return;
- }
-
- const auto& items = blob_data_->items();
- DCHECK_LT(index, items.size());
- const BlobDataItem& item = *items.at(index);
- DCHECK(IsFileType(item.type()));
-
- uint64 file_length = result;
- uint64 item_offset = item.offset();
- uint64 item_length = item.length();
-
- if (item_offset > file_length) {
- NotifyFailure(net::ERR_FILE_NOT_FOUND);
- return;
- }
-
- uint64 max_length = file_length - item_offset;
-
- // If item length is undefined, then we need to use the file size being
- // resolved in the real time.
- if (item_length == std::numeric_limits<uint64>::max()) {
- item_length = max_length;
- } else if (item_length > max_length) {
- NotifyFailure(net::ERR_FILE_NOT_FOUND);
- return;
- }
-
- if (!AddItemLength(index, item_length))
- return;
-
- if (--pending_get_file_info_count_ == 0)
- DidCountSize(net::OK);
-}
-
-void BlobURLRequestJob::Seek(int64 offset) {
- // Skip the initial items that are not in the range.
- const auto& items = blob_data_->items();
- for (current_item_index_ = 0;
- current_item_index_ < items.size() &&
- offset >= item_length_list_[current_item_index_];
- ++current_item_index_) {
- offset -= item_length_list_[current_item_index_];
- }
-
- // Set the offset that need to jump to for the first item in the range.
- current_item_offset_ = offset;
-
- if (offset == 0)
- return;
-
- // Adjust the offset of the first stream if it is of file type.
- const BlobDataItem& item = *items.at(current_item_index_);
- if (IsFileType(item.type())) {
- DeleteCurrentFileReader();
- CreateFileStreamReader(current_item_index_, offset);
- }
-}
-
-bool BlobURLRequestJob::ReadItem() {
- // Are we done with reading all the blob data?
- if (remaining_bytes_ == 0)
- return true;
-
- const auto& items = blob_data_->items();
- // If we get to the last item but still expect something to read, bail out
- // since something is wrong.
- if (current_item_index_ >= items.size()) {
- NotifyFailure(net::ERR_FAILED);
- return false;
- }
-
- // Compute the bytes to read for current item.
- int bytes_to_read = ComputeBytesToRead();
-
- // If nothing to read for current item, advance to next item.
- if (bytes_to_read == 0) {
- AdvanceItem();
- return true;
- }
-
- // Do the reading.
- const BlobDataItem& item = *items.at(current_item_index_);
- if (item.type() == DataElement::TYPE_BYTES)
- return ReadBytesItem(item, bytes_to_read);
- if (item.type() == DataElement::TYPE_DISK_CACHE_ENTRY)
- return ReadDiskCacheEntryItem(item, bytes_to_read);
- if (!IsFileType(item.type())) {
- NOTREACHED();
- return false;
- }
- storage::FileStreamReader* const reader =
- GetFileStreamReader(current_item_index_);
- if (!reader) {
- NotifyFailure(net::ERR_FAILED);
- return false;
- }
-
- return ReadFileItem(reader, bytes_to_read);
-}
-
-void BlobURLRequestJob::AdvanceItem() {
- // Close the file if the current item is a file.
- DeleteCurrentFileReader();
-
- // Advance to the next item.
- current_item_index_++;
- current_item_offset_ = 0;
-}
-
-void BlobURLRequestJob::AdvanceBytesRead(int result) {
- DCHECK_GT(result, 0);
-
- // Do we finish reading the current item?
- current_item_offset_ += result;
- if (current_item_offset_ == item_length_list_[current_item_index_])
- AdvanceItem();
-
- // Subtract the remaining bytes.
- remaining_bytes_ -= result;
- DCHECK_GE(remaining_bytes_, 0);
-
- // Adjust the read buffer.
- read_buf_->DidConsume(result);
- DCHECK_GE(read_buf_->BytesRemaining(), 0);
-}
-
-bool BlobURLRequestJob::ReadBytesItem(const BlobDataItem& item,
- int bytes_to_read) {
- TRACE_EVENT1("Blob", "BlobRequest::ReadBytesItem", "uuid",
- blob_data_->uuid());
- DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
-
- memcpy(read_buf_->data(),
- item.bytes() + item.offset() + current_item_offset_,
- bytes_to_read);
-
- AdvanceBytesRead(bytes_to_read);
- return true;
-}
-
-bool BlobURLRequestJob::ReadFileItem(FileStreamReader* reader,
- int bytes_to_read) {
- DCHECK(!GetStatus().is_io_pending())
- << "Can't begin IO while another IO operation is pending.";
- DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
- DCHECK(reader);
- int chunk_number = current_file_chunk_number_++;
- TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
- blob_data_->uuid());
- const int result =
- reader->Read(read_buf_.get(), bytes_to_read,
- base::Bind(&BlobURLRequestJob::DidReadFile,
- weak_factory_.GetWeakPtr(), chunk_number));
- if (result >= 0) {
- AdvanceBytesRead(result);
- return true;
- }
- if (result == net::ERR_IO_PENDING)
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- else
- NotifyFailure(result);
- return false;
-}
-
-void BlobURLRequestJob::DidReadFile(int chunk_number, int result) {
- DCHECK(GetStatus().is_io_pending())
- << "Asynchronous IO completed while IO wasn't pending?";
- TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
- blob_data_->uuid());
- if (result <= 0) {
- NotifyFailure(result);
- return;
- }
- SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
-
- AdvanceBytesRead(result);
-
- // Otherwise, continue the reading.
- int bytes_read = 0;
- if (ReadLoop(&bytes_read))
- NotifyReadComplete(bytes_read);
-}
-
-void BlobURLRequestJob::DeleteCurrentFileReader() {
- IndexToReaderMap::iterator found = index_to_reader_.find(current_item_index_);
- if (found != index_to_reader_.end() && found->second) {
- delete found->second;
- index_to_reader_.erase(found);
- }
-}
-
-bool BlobURLRequestJob::ReadDiskCacheEntryItem(const BlobDataItem& item,
- int bytes_to_read) {
- DCHECK(!GetStatus().is_io_pending())
- << "Can't begin IO while another IO operation is pending.";
- DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
-
- const int result = item.disk_cache_entry()->ReadData(
- item.disk_cache_stream_index(), current_item_offset_, read_buf_.get(),
- bytes_to_read, base::Bind(&BlobURLRequestJob::DidReadDiskCacheEntry,
- weak_factory_.GetWeakPtr()));
- if (result >= 0) {
- AdvanceBytesRead(result);
- return true;
- }
- if (result == net::ERR_IO_PENDING)
- SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
- else
- NotifyFailure(result);
- return false;
+ net::HttpStatusCode status_code = net::HTTP_OK;
+ if (byte_range_set_ && byte_range_.IsValid())
+ status_code = net::HTTP_PARTIAL_CONTENT;
+ HeadersCompleted(status_code);
}
-void BlobURLRequestJob::DidReadDiskCacheEntry(int result) {
- DCHECK(GetStatus().is_io_pending())
- << "Asynchronous IO completed while IO wasn't pending?";
- if (result <= 0) {
+void BlobURLRequestJob::DidReadRawData(int result) {
+ TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadRawData", this, "uuid",
+ blob_handle_ ? blob_handle_->uuid() : "NotFound");
+ if (result < 0) {
NotifyFailure(result);
return;
}
+ // Clear the IO_PENDING status
SetStatus(net::URLRequestStatus());
-
- AdvanceBytesRead(result);
-
- int bytes_read = 0;
- if (ReadLoop(&bytes_read))
- NotifyReadComplete(bytes_read);
-}
-
-int BlobURLRequestJob::BytesReadCompleted() {
- int bytes_read = read_buf_->BytesConsumed();
- read_buf_ = NULL;
- return bytes_read;
-}
-
-int BlobURLRequestJob::ComputeBytesToRead() const {
- int64 current_item_length = item_length_list_[current_item_index_];
-
- int64 item_remaining = current_item_length - current_item_offset_;
- int64 buf_remaining = read_buf_->BytesRemaining();
- int64 max_remaining = std::numeric_limits<int>::max();
-
- int64 min = std::min(std::min(std::min(item_remaining,
- buf_remaining),
- remaining_bytes_),
- max_remaining);
-
- return static_cast<int>(min);
-}
-
-bool BlobURLRequestJob::ReadLoop(int* bytes_read) {
- // Read until we encounter an error or could not get the data immediately.
- while (remaining_bytes_ > 0 && read_buf_->BytesRemaining() > 0) {
- if (!ReadItem())
- return false;
- }
-
- *bytes_read = BytesReadCompleted();
- return true;
-}
-
-void BlobURLRequestJob::NotifySuccess() {
- net::HttpStatusCode status_code = net::HTTP_OK;
- if (byte_range_set_ && byte_range_.IsValid())
- status_code = net::HTTP_PARTIAL_CONTENT;
- HeadersCompleted(status_code);
+ NotifyReadComplete(result);
}
void BlobURLRequestJob::NotifyFailure(int error_code) {
@@ -546,8 +237,8 @@ void BlobURLRequestJob::NotifyFailure(int error_code) {
// If we already return the headers on success, we can't change the headers
// now. Instead, we just error out.
if (response_info_) {
- NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
- error_code));
+ NotifyDone(
+ net::URLRequestStatus(net::URLRequestStatus::FAILED, error_code));
return;
}
@@ -582,10 +273,14 @@ void BlobURLRequestJob::HeadersCompleted(net::HttpStatusCode status_code) {
status.append("\0\0", 2);
net::HttpResponseHeaders* headers = new net::HttpResponseHeaders(status);
+ set_expected_content_size(0);
+
if (status_code == net::HTTP_OK || status_code == net::HTTP_PARTIAL_CONTENT) {
+ set_expected_content_size(blob_reader_->remaining_bytes());
std::string content_length_header(net::HttpRequestHeaders::kContentLength);
content_length_header.append(": ");
- content_length_header.append(base::Int64ToString(remaining_bytes_));
+ content_length_header.append(
+ base::Int64ToString(blob_reader_->remaining_bytes()));
headers->AddHeader(content_length_header);
if (status_code == net::HTTP_PARTIAL_CONTENT) {
DCHECK(byte_range_set_);
@@ -593,21 +288,22 @@ void BlobURLRequestJob::HeadersCompleted(net::HttpStatusCode status_code) {
std::string content_range_header(net::HttpResponseHeaders::kContentRange);
content_range_header.append(": bytes ");
content_range_header.append(base::StringPrintf(
- "%" PRId64 "-%" PRId64,
- byte_range_.first_byte_position(), byte_range_.last_byte_position()));
+ "%" PRId64 "-%" PRId64, byte_range_.first_byte_position(),
+ byte_range_.last_byte_position()));
content_range_header.append("/");
- content_range_header.append(base::StringPrintf("%" PRId64, total_size_));
+ content_range_header.append(
+ base::StringPrintf("%" PRId64, blob_reader_->total_size()));
headers->AddHeader(content_range_header);
}
- if (!blob_data_->content_type().empty()) {
+ if (!blob_handle_->content_type().empty()) {
std::string content_type_header(net::HttpRequestHeaders::kContentType);
content_type_header.append(": ");
- content_type_header.append(blob_data_->content_type());
+ content_type_header.append(blob_handle_->content_type());
headers->AddHeader(content_type_header);
}
- if (!blob_data_->content_disposition().empty()) {
+ if (!blob_handle_->content_disposition().empty()) {
std::string content_disposition_header("Content-Disposition: ");
- content_disposition_header.append(blob_data_->content_disposition());
+ content_disposition_header.append(blob_handle_->content_disposition());
headers->AddHeader(content_disposition_header);
}
}
@@ -615,69 +311,7 @@ void BlobURLRequestJob::HeadersCompleted(net::HttpStatusCode status_code) {
response_info_.reset(new net::HttpResponseInfo());
response_info_->headers = headers;
- set_expected_content_size(remaining_bytes_);
-
NotifyHeadersComplete();
}
-FileStreamReader* BlobURLRequestJob::GetFileStreamReader(size_t index) {
- const auto& items = blob_data_->items();
- DCHECK_LT(index, items.size());
- const BlobDataItem& item = *items.at(index);
- if (!IsFileType(item.type()))
- return nullptr;
- if (index_to_reader_.find(index) == index_to_reader_.end()) {
- if (!CreateFileStreamReader(index, 0))
- return nullptr;
- }
- DCHECK(index_to_reader_[index]);
- return index_to_reader_[index];
-}
-
-bool BlobURLRequestJob::CreateFileStreamReader(size_t index,
- int64 additional_offset) {
- const auto& items = blob_data_->items();
- DCHECK_LT(index, items.size());
- const BlobDataItem& item = *items.at(index);
- DCHECK(IsFileType(item.type()));
- DCHECK_EQ(0U, index_to_reader_.count(index));
-
- FileStreamReader* reader = nullptr;
- switch (item.type()) {
- case DataElement::TYPE_FILE:
- reader = FileStreamReader::CreateForLocalFile(
- file_task_runner_.get(), item.path(),
- item.offset() + additional_offset, item.expected_modification_time());
- DCHECK(reader);
- index_to_reader_[index] = reader;
- return true;
-
- case DataElement::TYPE_FILE_FILESYSTEM:
- reader = file_system_context_
- ->CreateFileStreamReader(
- storage::FileSystemURL(file_system_context_->CrackURL(
- item.filesystem_url())),
- item.offset() + additional_offset,
- item.length() == std::numeric_limits<uint64>::max()
- ? storage::kMaximumLength
- : item.length() - additional_offset,
- item.expected_modification_time())
- .release();
- if (reader) {
- index_to_reader_[index] = reader;
- return true;
- }
-
- // The file stream reader may not be obtainable if the file is on an
- // isolated file system, which has been unmounted.
- return false;
-
- default:
- break;
- }
-
- NOTREACHED();
- return false;
-}
-
} // namespace storage
diff --git a/storage/browser/blob/blob_url_request_job.h b/storage/browser/blob/blob_url_request_job.h
index 74d07ad..21baa2c 100644
--- a/storage/browser/blob/blob_url_request_job.h
+++ b/storage/browser/blob/blob_url_request_job.h
@@ -13,7 +13,6 @@
#include "net/http/http_byte_range.h"
#include "net/http/http_status_code.h"
#include "net/url_request/url_request_job.h"
-#include "storage/browser/blob/blob_data_snapshot.h"
#include "storage/browser/storage_browser_export.h"
namespace base {
@@ -27,6 +26,8 @@ class IOBuffer;
namespace storage {
+class BlobDataHandle;
+class BlobReader;
class FileStreamReader;
class FileSystemContext;
@@ -36,7 +37,7 @@ class STORAGE_EXPORT BlobURLRequestJob
public:
BlobURLRequestJob(net::URLRequest* request,
net::NetworkDelegate* network_delegate,
- scoped_ptr<BlobDataSnapshot> blob_data,
+ BlobDataHandle* blob_handle,
storage::FileSystemContext* file_system_context,
base::SingleThreadTaskRunner* resolving_thread_task_runner);
@@ -57,68 +58,20 @@ class STORAGE_EXPORT BlobURLRequestJob
// For preparing for read: get the size, apply the range and perform seek.
void DidStart();
- bool AddItemLength(size_t index, int64 item_length);
- bool CountSize();
- void DidCountSize(int error);
- void DidGetFileItemLength(size_t index, int64 result);
- void Seek(int64 offset);
-
- // For reading the blob.
- bool ReadLoop(int* bytes_read);
- bool ReadItem();
- void AdvanceItem();
- void AdvanceBytesRead(int result);
- bool ReadBytesItem(const BlobDataItem& item, int bytes_to_read);
-
- bool ReadFileItem(FileStreamReader* reader, int bytes_to_read);
- void DidReadFile(int chunk_number, int result);
- void DeleteCurrentFileReader();
-
- bool ReadDiskCacheEntryItem(const BlobDataItem& item, int bytes_to_read);
- void DidReadDiskCacheEntry(int result);
-
- int ComputeBytesToRead() const;
- int BytesReadCompleted();
-
- // These methods convert the result of blob data reading into response headers
- // and pass it to URLRequestJob's NotifyDone() or NotifyHeadersComplete().
- void NotifySuccess();
+ void DidCalculateSize(int result);
+ void DidReadRawData(int result);
+
void NotifyFailure(int);
void HeadersCompleted(net::HttpStatusCode status_code);
- // Returns a FileStreamReader for a blob item at |index|.
- // If the item at |index| is not of file this returns NULL.
- FileStreamReader* GetFileStreamReader(size_t index);
-
- // Creates a FileStreamReader for the item at |index| with additional_offset.
- // If failed, then returns false.
- bool CreateFileStreamReader(size_t index, int64 additional_offset);
-
- scoped_ptr<BlobDataSnapshot> blob_data_;
-
- // Variables for controlling read from |blob_data_|.
- scoped_refptr<storage::FileSystemContext> file_system_context_;
- scoped_refptr<base::SingleThreadTaskRunner> file_task_runner_;
- std::vector<int64> item_length_list_;
- int64 total_size_;
- int64 remaining_bytes_;
- int pending_get_file_info_count_;
- IndexToReaderMap index_to_reader_;
- size_t current_item_index_;
- int64 current_item_offset_;
-
- // Holds the buffer for read data with the IOBuffer interface.
- scoped_refptr<net::DrainableIOBuffer> read_buf_;
-
// Is set when NotifyFailure() is called and reset when DidStart is called.
bool error_;
bool byte_range_set_;
net::HttpByteRange byte_range_;
- // Used to create unique id's for tracing.
- int current_file_chunk_number_;
-
+ scoped_ptr<BlobDataHandle> blob_handle_;
+ scoped_ptr<BlobReader> blob_reader_;
scoped_ptr<net::HttpResponseInfo> response_info_;
base::WeakPtrFactory<BlobURLRequestJob> weak_factory_;
diff --git a/storage/browser/blob/blob_url_request_job_factory.cc b/storage/browser/blob/blob_url_request_job_factory.cc
index feb5df0..5961697 100644
--- a/storage/browser/blob/blob_url_request_job_factory.cc
+++ b/storage/browser/blob/blob_url_request_job_factory.cc
@@ -19,10 +19,6 @@ namespace {
int kUserDataKey; // The value is not important, the addr is a key.
-BlobDataHandle* GetRequestedBlobDataHandle(net::URLRequest* request) {
- return static_cast<BlobDataHandle*>(request->GetUserData(&kUserDataKey));
-}
-
} // namespace
// static
@@ -44,6 +40,12 @@ void BlobProtocolHandler::SetRequestedBlobDataHandle(
request->SetUserData(&kUserDataKey, blob_data_handle.release());
}
+// static
+BlobDataHandle* BlobProtocolHandler::GetRequestBlobDataHandle(
+ net::URLRequest* request) {
+ return static_cast<BlobDataHandle*>(request->GetUserData(&kUserDataKey));
+}
+
BlobProtocolHandler::BlobProtocolHandler(
BlobStorageContext* context,
storage::FileSystemContext* file_system_context,
@@ -59,18 +61,16 @@ BlobProtocolHandler::~BlobProtocolHandler() {
net::URLRequestJob* BlobProtocolHandler::MaybeCreateJob(
net::URLRequest* request, net::NetworkDelegate* network_delegate) const {
- return new storage::BlobURLRequestJob(request,
- network_delegate,
- LookupBlobData(request),
- file_system_context_.get(),
- file_task_runner_.get());
+ return new storage::BlobURLRequestJob(
+ request, network_delegate, LookupBlobHandle(request),
+ file_system_context_.get(), file_task_runner_.get());
}
-scoped_ptr<BlobDataSnapshot> BlobProtocolHandler::LookupBlobData(
+BlobDataHandle* BlobProtocolHandler::LookupBlobHandle(
net::URLRequest* request) const {
- BlobDataHandle* blob_data_handle = GetRequestedBlobDataHandle(request);
+ BlobDataHandle* blob_data_handle = GetRequestBlobDataHandle(request);
if (blob_data_handle)
- return blob_data_handle->CreateSnapshot().Pass();
+ return blob_data_handle;
if (!context_.get())
return NULL;
@@ -83,12 +83,11 @@ scoped_ptr<BlobDataSnapshot> BlobProtocolHandler::LookupBlobData(
return NULL;
std::string uuid = request->url().spec().substr(kPrefix.length());
scoped_ptr<BlobDataHandle> handle = context_->GetBlobDataFromUUID(uuid);
- scoped_ptr<BlobDataSnapshot> snapshot;
+ BlobDataHandle* handle_ptr = handle.get();
if (handle) {
- snapshot = handle->CreateSnapshot().Pass();
SetRequestedBlobDataHandle(request, handle.Pass());
}
- return snapshot.Pass();
+ return handle_ptr;
}
} // namespace storage
diff --git a/storage/browser/blob/blob_url_request_job_factory.h b/storage/browser/blob/blob_url_request_job_factory.h
index 7f7a550..dcb2fd6 100644
--- a/storage/browser/blob/blob_url_request_job_factory.h
+++ b/storage/browser/blob/blob_url_request_job_factory.h
@@ -26,7 +26,6 @@ class URLRequestContext;
namespace storage {
-class BlobDataSnapshot;
class BlobDataHandle;
class BlobStorageContext;
@@ -45,6 +44,9 @@ class STORAGE_EXPORT BlobProtocolHandler
net::URLRequest* request,
scoped_ptr<BlobDataHandle> blob_data_handle);
+ // This gets the handle on the request if it exists.
+ static BlobDataHandle* GetRequestBlobDataHandle(net::URLRequest* request);
+
BlobProtocolHandler(
BlobStorageContext* context,
storage::FileSystemContext* file_system_context,
@@ -56,7 +58,7 @@ class STORAGE_EXPORT BlobProtocolHandler
net::NetworkDelegate* network_delegate) const override;
private:
- scoped_ptr<BlobDataSnapshot> LookupBlobData(net::URLRequest* request) const;
+ BlobDataHandle* LookupBlobHandle(net::URLRequest* request) const;
base::WeakPtr<BlobStorageContext> context_;
const scoped_refptr<storage::FileSystemContext> file_system_context_;
diff --git a/storage/browser/blob/upload_blob_element_reader.cc b/storage/browser/blob/upload_blob_element_reader.cc
new file mode 100644
index 0000000..dd0058f
--- /dev/null
+++ b/storage/browser/blob/upload_blob_element_reader.cc
@@ -0,0 +1,67 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "storage/browser/blob/upload_blob_element_reader.h"
+
+#include "net/base/net_errors.h"
+#include "storage/browser/blob/blob_data_handle.h"
+#include "storage/browser/blob/blob_reader.h"
+
+namespace storage {
+
+UploadBlobElementReader::UploadBlobElementReader(
+ scoped_ptr<storage::BlobReader> reader,
+ scoped_ptr<BlobDataHandle> handle)
+ : reader_(reader.Pass()), handle_(handle.Pass()) {}
+
+UploadBlobElementReader::~UploadBlobElementReader() {}
+
+int UploadBlobElementReader::Init(const net::CompletionCallback& callback) {
+ BlobReader::Status status = reader_->CalculateSize(callback);
+ switch (status) {
+ case BlobReader::Status::NET_ERROR:
+ return reader_->net_error();
+ case BlobReader::Status::IO_PENDING:
+ return net::ERR_IO_PENDING;
+ case BlobReader::Status::DONE:
+ return net::OK;
+ }
+ NOTREACHED();
+ return net::ERR_FAILED;
+}
+
+uint64_t UploadBlobElementReader::GetContentLength() const {
+ return reader_->total_size();
+}
+
+uint64_t UploadBlobElementReader::BytesRemaining() const {
+ return reader_->remaining_bytes();
+}
+
+bool UploadBlobElementReader::IsInMemory() const {
+ return reader_->IsInMemory();
+}
+
+int UploadBlobElementReader::Read(net::IOBuffer* buf,
+ int buf_length,
+ const net::CompletionCallback& callback) {
+ int length = 0;
+ BlobReader::Status status = reader_->Read(buf, buf_length, &length, callback);
+ switch (status) {
+ case BlobReader::Status::NET_ERROR:
+ return reader_->net_error();
+ case BlobReader::Status::IO_PENDING:
+ return net::ERR_IO_PENDING;
+ case BlobReader::Status::DONE:
+ return length;
+ }
+ NOTREACHED();
+ return net::ERR_FAILED;
+}
+
+const std::string& UploadBlobElementReader::uuid() const {
+ return handle_->uuid();
+}
+
+} // namespace storage
diff --git a/storage/browser/blob/upload_blob_element_reader.h b/storage/browser/blob/upload_blob_element_reader.h
new file mode 100644
index 0000000..72b7244
--- /dev/null
+++ b/storage/browser/blob/upload_blob_element_reader.h
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef STORAGE_BROWSER_BLOB_UPLOAD_BLOB_ELEMENT_READER_H_
+#define STORAGE_BROWSER_BLOB_UPLOAD_BLOB_ELEMENT_READER_H_
+
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/completion_callback.h"
+#include "net/base/upload_element_reader.h"
+#include "storage/browser/storage_browser_export.h"
+
+namespace net {
+class IOBuffer;
+}
+
+namespace storage {
+class BlobDataHandle;
+class BlobReader;
+
+// This class is a wrapper around the BlobReader to make it conform
+// to the net::UploadElementReader interface, and it also holds around the
+// handle to the blob so it stays in memory while we read it.
+class STORAGE_EXPORT UploadBlobElementReader
+ : NON_EXPORTED_BASE(public net::UploadElementReader) {
+ public:
+ UploadBlobElementReader(scoped_ptr<BlobReader> reader,
+ scoped_ptr<BlobDataHandle> handle);
+ ~UploadBlobElementReader() override;
+
+ int Init(const net::CompletionCallback& callback) override;
+
+ uint64_t GetContentLength() const override;
+
+ uint64_t BytesRemaining() const override;
+
+ bool IsInMemory() const override;
+
+ int Read(net::IOBuffer* buf,
+ int buf_length,
+ const net::CompletionCallback& callback) override;
+
+ const std::string& uuid() const;
+
+ private:
+ scoped_ptr<BlobReader> reader_;
+ scoped_ptr<BlobDataHandle> handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(UploadBlobElementReader);
+};
+
+} // namespace storage
+#endif // STORAGE_BROWSER_BLOB_UPLOAD_BLOB_ELEMENT_READER_H_
diff --git a/storage/storage_browser.gyp b/storage/storage_browser.gyp
index 6a8b749..de1d166 100644
--- a/storage/storage_browser.gyp
+++ b/storage/storage_browser.gyp
@@ -33,6 +33,8 @@
'browser/blob/blob_data_item.h',
'browser/blob/blob_data_snapshot.cc',
'browser/blob/blob_data_snapshot.h',
+ 'browser/blob/blob_reader.cc',
+ 'browser/blob/blob_reader.h',
'browser/blob/blob_storage_context.cc',
'browser/blob/blob_storage_context.h',
'browser/blob/blob_url_request_job.cc',
@@ -47,6 +49,9 @@
'browser/blob/shareable_blob_data_item.h',
'browser/blob/shareable_file_reference.cc',
'browser/blob/shareable_file_reference.h',
+ 'browser/blob/upload_blob_element_reader.h',
+ 'browser/blob/upload_blob_element_reader.cc',
+ 'browser/blob/view_blob_internals_job.h',
'browser/blob/view_blob_internals_job.cc',
'browser/blob/view_blob_internals_job.h',
'browser/database/database_quota_client.cc',