summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorttuttle@chromium.org <ttuttle@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-01 05:13:59 +0000
committerttuttle@chromium.org <ttuttle@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-01 05:13:59 +0000
commite8ce38d361c43af704975d9ad83a10e30503bcd1 (patch)
tree74c5ecb2737302bdd7b450c420aa871a53f62c94
parent6f7eaca0679576f44f152b382f0f0a24bf09fc66 (diff)
downloadchromium_src-e8ce38d361c43af704975d9ad83a10e30503bcd1.zip
chromium_src-e8ce38d361c43af704975d9ad83a10e30503bcd1.tar.gz
chromium_src-e8ce38d361c43af704975d9ad83a10e30503bcd1.tar.bz2
Simple Cache: Add basic sparse support
This consists of a single, optional "sparse file" that contains a series of ranges, each starting with a (magic, offset, length) header. It is scanned at open, if it exists, and is created on the first call to WriteSparseData. BUG=280585 Review URL: https://codereview.chromium.org/23757032 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@232291 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--net/disk_cache/entry_unittest.cc112
-rw-r--r--net/disk_cache/simple/simple_entry_format.h8
-rw-r--r--net/disk_cache/simple/simple_entry_impl.cc244
-rw-r--r--net/disk_cache/simple/simple_entry_impl.h33
-rw-r--r--net/disk_cache/simple/simple_entry_operation.cc139
-rw-r--r--net/disk_cache/simple/simple_entry_operation.h44
-rw-r--r--net/disk_cache/simple/simple_synchronous_entry.cc522
-rw-r--r--net/disk_cache/simple/simple_synchronous_entry.h81
-rw-r--r--net/disk_cache/simple/simple_util.cc4
-rw-r--r--net/disk_cache/simple/simple_util.h3
10 files changed, 1120 insertions, 70 deletions
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index 0d10866..dabc391 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -1931,7 +1931,7 @@ void DiskCacheEntryTest::UpdateSparseEntry() {
entry1->Close();
entry2->Close();
FlushQueueForTest();
- if (memory_only_)
+ if (memory_only_ || simple_cache_mode_)
EXPECT_EQ(2, cache_->GetEntryCount());
else
EXPECT_EQ(3, cache_->GetEntryCount());
@@ -1964,20 +1964,15 @@ void DiskCacheEntryTest::DoomSparseEntry() {
int64 offset = 1024;
// Write to a bunch of ranges.
for (int i = 0; i < 12; i++) {
- EXPECT_EQ(kSize,
- entry1->WriteSparseData(
- offset, buf.get(), kSize, net::CompletionCallback()));
+ EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
// Keep the second map under the default size.
- if (i < 9) {
- EXPECT_EQ(kSize,
- entry2->WriteSparseData(
- offset, buf.get(), kSize, net::CompletionCallback()));
- }
+ if (i < 9)
+ EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
offset *= 4;
}
- if (memory_only_)
+ if (memory_only_ || simple_cache_mode_)
EXPECT_EQ(2, cache_->GetEntryCount());
else
EXPECT_EQ(15, cache_->GetEntryCount());
@@ -2110,7 +2105,7 @@ void DiskCacheEntryTest::PartialSparseEntry() {
int rv;
int64 start;
net::TestCompletionCallback cb;
- if (memory_only_) {
+ if (memory_only_ || simple_cache_mode_) {
rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
EXPECT_EQ(100, cb.GetResult(rv));
EXPECT_EQ(500, start);
@@ -2129,7 +2124,7 @@ void DiskCacheEntryTest::PartialSparseEntry() {
// 1. Query before a filled 1KB block.
// 2. Query within a filled 1KB block.
// 3. Query beyond a filled 1KB block.
- if (memory_only_) {
+ if (memory_only_ || simple_cache_mode_) {
rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
EXPECT_EQ(3496, cb.GetResult(rv));
EXPECT_EQ(20000, start);
@@ -3527,8 +3522,9 @@ TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
ASSERT_TRUE(entry_file0 != base::kInvalidPlatformFileValue);
int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
+ int sparse_data_size = 0;
disk_cache::SimpleEntryStat entry_stat(
- base::Time::Now(), base::Time::Now(), data_size);
+ base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
disk_cache::SimpleFileEOF eof_record;
ASSERT_EQ(static_cast<int>(sizeof(eof_record)), base::ReadPlatformFile(
@@ -3812,4 +3808,94 @@ TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
}
}
+TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
+ SetSimpleCacheMode();
+ InitCache();
+ BasicSparseIO();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
+ SetSimpleCacheMode();
+ InitCache();
+ HugeSparseIO();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
+ SetSimpleCacheMode();
+ InitCache();
+ GetAvailableRange();
+}
+
+TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCouldBeSparse) {
+ SetSimpleCacheMode();
+ InitCache();
+ CouldBeSparse();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ UpdateSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ DoomSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
+ SetSimpleCacheMode();
+ InitCache();
+ PartialSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
+ const int kSize = 1024;
+
+ SetSimpleCacheMode();
+ // An entry is allowed sparse data 1/10 the size of the cache, so this size
+ // allows for one |kSize|-sized range plus overhead, but not two ranges.
+ SetMaxSize(kSize * 15);
+ InitCache();
+
+ const char key[] = "key";
+ disk_cache::Entry* null = NULL;
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+ EXPECT_NE(null, entry);
+
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+ net::TestCompletionCallback callback;
+ int ret;
+
+ // Verify initial conditions.
+ ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(0, callback.GetResult(ret));
+
+ ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
+ EXPECT_EQ(0, callback.GetResult(ret));
+
+ // Write a range and make sure it reads back.
+ ret = entry->WriteSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ // Write another range and make sure it reads back.
+ ret = entry->WriteSparseData(kSize, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ ret = entry->ReadSparseData(kSize, buffer, kSize, callback.callback());
+ EXPECT_EQ(kSize, callback.GetResult(ret));
+
+ // Make sure the first range was removed when the second was written.
+ ret = entry->ReadSparseData(0, buffer, kSize, callback.callback());
+ EXPECT_EQ(0, callback.GetResult(ret));
+
+ entry->Close();
+}
+
#endif // defined(OS_POSIX)
diff --git a/net/disk_cache/simple/simple_entry_format.h b/net/disk_cache/simple/simple_entry_format.h
index 8224b85..46a8c4a 100644
--- a/net/disk_cache/simple/simple_entry_format.h
+++ b/net/disk_cache/simple/simple_entry_format.h
@@ -18,6 +18,7 @@ namespace disk_cache {
const uint64 kSimpleInitialMagicNumber = GG_UINT64_C(0xfcfb6d1ba7725c30);
const uint64 kSimpleFinalMagicNumber = GG_UINT64_C(0xf4fa6f45970d41d8);
+const uint64 kSimpleSparseRangeMagicNumber = GG_UINT64_C(0xeb97bf016553676b);
// A file containing stream 0 and stream 1 in the Simple cache consists of:
// - a SimpleFileHeader.
@@ -58,6 +59,13 @@ struct NET_EXPORT_PRIVATE SimpleFileEOF {
uint32 stream_size;
};
+struct SimpleFileSparseRangeHeader {
+ uint64 sparse_range_magic_number;
+ int64 offset;
+ int64 length;
+ uint32 data_crc32;
+};
+
} // namespace disk_cache
#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_ENTRY_FORMAT_H_
diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc
index be7788b..45ec1f9 100644
--- a/net/disk_cache/simple/simple_entry_impl.cc
+++ b/net/disk_cache/simple/simple_entry_impl.cc
@@ -31,6 +31,10 @@
namespace disk_cache {
namespace {
+// An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
+// the cache.
+const int64 kMaxSparseDataSizeDivisor = 10;
+
// Used in histograms, please only add entries at the end.
enum ReadResult {
READ_RESULT_SUCCESS = 0,
@@ -170,6 +174,7 @@ SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
last_used_(Time::Now()),
last_modified_(last_used_),
+ sparse_data_size_(0),
open_count_(0),
doomed_(false),
state_(STATE_UNINITIALIZED),
@@ -463,9 +468,11 @@ int SimpleEntryImpl::ReadSparseData(int64 offset,
int buf_len,
const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_FAILED;
+
+ ScopedOperationRunner operation_runner(this);
+ pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
+ this, offset, buf_len, buf, callback));
+ return net::ERR_IO_PENDING;
}
int SimpleEntryImpl::WriteSparseData(int64 offset,
@@ -473,9 +480,11 @@ int SimpleEntryImpl::WriteSparseData(int64 offset,
int buf_len,
const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_FAILED;
+
+ ScopedOperationRunner operation_runner(this);
+ pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
+ this, offset, buf_len, buf, callback));
+ return net::ERR_IO_PENDING;
}
int SimpleEntryImpl::GetAvailableRange(int64 offset,
@@ -483,28 +492,32 @@ int SimpleEntryImpl::GetAvailableRange(int64 offset,
int64* start,
const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_FAILED;
+
+ ScopedOperationRunner operation_runner(this);
+ pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
+ this, offset, len, start, callback));
+ return net::ERR_IO_PENDING;
}
bool SimpleEntryImpl::CouldBeSparse() const {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- return false;
+ // TODO(ttuttle): Actually check.
+ return true;
}
void SimpleEntryImpl::CancelSparseIO() {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
+ // The Simple Cache does not return distinct objects for the same non-doomed
+ // entry, so there's no need to coordinate which object is performing sparse
+ // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
}
int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
DCHECK(io_thread_checker_.CalledOnValidThread());
- // TODO(gavinp): Determine if the simple backend should support sparse data.
- NOTIMPLEMENTED();
- return net::ERR_NOT_IMPLEMENTED;
+ // The simple Cache does not return distinct objects for the same non-doomed
+ // entry, so there's no need to coordinate which object is performing sparse
+ // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
+ return net::OK;
}
SimpleEntryImpl::~SimpleEntryImpl() {
@@ -608,6 +621,24 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() {
operation->callback(),
operation->truncate());
break;
+ case SimpleEntryOperation::TYPE_READ_SPARSE:
+ ReadSparseDataInternal(operation->sparse_offset(),
+ operation->buf(),
+ operation->length(),
+ operation->callback());
+ break;
+ case SimpleEntryOperation::TYPE_WRITE_SPARSE:
+ WriteSparseDataInternal(operation->sparse_offset(),
+ operation->buf(),
+ operation->length(),
+ operation->callback());
+ break;
+ case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
+ GetAvailableRangeInternal(operation->sparse_offset(),
+ operation->length(),
+ operation->out_start(),
+ operation->callback());
+ break;
case SimpleEntryOperation::TYPE_DOOM:
DoomEntryInternal(operation->callback());
break;
@@ -651,7 +682,8 @@ void SimpleEntryImpl::OpenEntryInternal(bool have_index,
const base::TimeTicks start_time = base::TimeTicks::Now();
scoped_ptr<SimpleEntryCreationResults> results(
new SimpleEntryCreationResults(
- SimpleEntryStat(last_used_, last_modified_, data_size_)));
+ SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_)));
Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
cache_type_,
path_,
@@ -699,7 +731,8 @@ void SimpleEntryImpl::CreateEntryInternal(bool have_index,
const base::TimeTicks start_time = base::TimeTicks::Now();
scoped_ptr<SimpleEntryCreationResults> results(
new SimpleEntryCreationResults(
- SimpleEntryStat(last_used_, last_modified_, data_size_)));
+ SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_)));
Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
cache_type_,
path_,
@@ -746,7 +779,8 @@ void SimpleEntryImpl::CloseInternal() {
Closure task =
base::Bind(&SimpleSynchronousEntry::Close,
base::Unretained(synchronous_entry_),
- SimpleEntryStat(last_used_, last_modified_, data_size_),
+ SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_),
base::Passed(&crc32s_to_write),
stream_0_data_);
Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
@@ -825,7 +859,8 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index,
scoped_ptr<uint32> read_crc32(new uint32());
scoped_ptr<int> result(new int());
scoped_ptr<SimpleEntryStat> entry_stat(
- new SimpleEntryStat(last_used_, last_modified_, data_size_));
+ new SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_));
Closure task = base::Bind(
&SimpleSynchronousEntry::ReadData,
base::Unretained(synchronous_entry_),
@@ -908,7 +943,8 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
// |entry_stat| needs to be initialized before modifying |data_size_|.
scoped_ptr<SimpleEntryStat> entry_stat(
- new SimpleEntryStat(last_used_, last_modified_, data_size_));
+ new SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_));
if (truncate) {
data_size_[stream_index] = offset + buf_len;
} else {
@@ -944,6 +980,100 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
}
+void SimpleEntryImpl::ReadSparseDataInternal(
+ int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ DCHECK_EQ(STATE_READY, state_);
+ state_ = STATE_IO_PENDING;
+
+ scoped_ptr<int> result(new int());
+ scoped_ptr<base::Time> last_used(new base::Time());
+ Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(
+ sparse_offset, buf_len),
+ make_scoped_refptr(buf),
+ last_used.get(),
+ result.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete,
+ this,
+ callback,
+ base::Passed(&last_used),
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::WriteSparseDataInternal(
+ int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ DCHECK_EQ(STATE_READY, state_);
+ state_ = STATE_IO_PENDING;
+
+ int64 max_sparse_data_size = kint64max;
+ if (backend_.get()) {
+ int64 max_cache_size = backend_->index()->max_size();
+ max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
+ }
+
+ scoped_ptr<SimpleEntryStat> entry_stat(
+ new SimpleEntryStat(last_used_, last_modified_, data_size_,
+ sparse_data_size_));
+
+ last_used_ = last_modified_ = base::Time::Now();
+
+ scoped_ptr<int> result(new int());
+ Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(
+ sparse_offset, buf_len),
+ make_scoped_refptr(buf),
+ max_sparse_data_size,
+ entry_stat.get(),
+ result.get());
+ Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete,
+ this,
+ callback,
+ base::Passed(&entry_stat),
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
+void SimpleEntryImpl::GetAvailableRangeInternal(
+ int64 sparse_offset,
+ int len,
+ int64* out_start,
+ const CompletionCallback& callback) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ ScopedOperationRunner operation_runner(this);
+
+ DCHECK_EQ(STATE_READY, state_);
+ state_ = STATE_IO_PENDING;
+
+ scoped_ptr<int> result(new int());
+ Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
+ base::Unretained(synchronous_entry_),
+ SimpleSynchronousEntry::EntryOperationData(
+ sparse_offset, len),
+ out_start,
+ result.get());
+ Closure reply = base::Bind(
+ &SimpleEntryImpl::GetAvailableRangeOperationComplete,
+ this,
+ callback,
+ base::Passed(&result));
+ worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
+}
+
void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
PostTaskAndReplyWithResult(
worker_pool_, FROM_HERE,
@@ -1007,7 +1137,6 @@ void SimpleEntryImpl::CreationOperationComplete(
}
void SimpleEntryImpl::EntryOperationComplete(
- int stream_index,
const CompletionCallback& completion_callback,
const SimpleEntryStat& entry_stat,
scoped_ptr<int> result) {
@@ -1015,12 +1144,11 @@ void SimpleEntryImpl::EntryOperationComplete(
DCHECK(synchronous_entry_);
DCHECK_EQ(STATE_IO_PENDING, state_);
DCHECK(result);
- state_ = STATE_READY;
if (*result < 0) {
- MarkAsDoomed();
state_ = STATE_FAILURE;
- crc32s_end_offset_[stream_index] = 0;
+ MarkAsDoomed();
} else {
+ state_ = STATE_READY;
UpdateDataFromEntryStat(entry_stat);
}
@@ -1086,6 +1214,10 @@ void SimpleEntryImpl::ReadOperationComplete(
}
if (*result < 0) {
+ crc32s_end_offset_[stream_index] = 0;
+ }
+
+ if (*result < 0) {
RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
} else {
RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
@@ -1100,8 +1232,7 @@ void SimpleEntryImpl::ReadOperationComplete(
CreateNetLogReadWriteCompleteCallback(*result));
}
- EntryOperationComplete(
- stream_index, completion_callback, *entry_stat, result.Pass());
+ EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
}
void SimpleEntryImpl::WriteOperationComplete(
@@ -1118,8 +1249,47 @@ void SimpleEntryImpl::WriteOperationComplete(
CreateNetLogReadWriteCompleteCallback(*result));
}
- EntryOperationComplete(
- stream_index, completion_callback, *entry_stat, result.Pass());
+ if (*result < 0) {
+ crc32s_end_offset_[stream_index] = 0;
+ }
+
+ EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
+}
+
+void SimpleEntryImpl::ReadSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<base::Time> last_used,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK(result);
+
+ SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
+ sparse_data_size_);
+ EntryOperationComplete(completion_callback, entry_stat, result.Pass());
+}
+
+void SimpleEntryImpl::WriteSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<SimpleEntryStat> entry_stat,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK(result);
+
+ EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
+}
+
+void SimpleEntryImpl::GetAvailableRangeOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<int> result) {
+ DCHECK(io_thread_checker_.CalledOnValidThread());
+ DCHECK(synchronous_entry_);
+ DCHECK(result);
+
+ SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
+ sparse_data_size_);
+ EntryOperationComplete(completion_callback, entry_stat, result.Pass());
}
void SimpleEntryImpl::DoomOperationComplete(
@@ -1164,11 +1334,9 @@ void SimpleEntryImpl::ChecksumOperationComplete(
CreateNetLogReadWriteCompleteCallback(*result));
}
- EntryOperationComplete(
- stream_index,
- completion_callback,
- SimpleEntryStat(last_used_, last_modified_, data_size_),
- result.Pass());
+ SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
+ sparse_data_size_);
+ EntryOperationComplete(completion_callback, entry_stat, result.Pass());
}
void SimpleEntryImpl::CloseOperationComplete() {
@@ -1193,6 +1361,7 @@ void SimpleEntryImpl::UpdateDataFromEntryStat(
for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
data_size_[i] = entry_stat.data_size(i);
}
+ sparse_data_size_ = entry_stat.sparse_data_size();
if (!doomed_ && backend_.get())
backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
}
@@ -1203,6 +1372,7 @@ int64 SimpleEntryImpl::GetDiskUsage() const {
file_size +=
simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
}
+ file_size += sparse_data_size_;
return file_size;
}
@@ -1286,7 +1456,8 @@ int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
}
memcpy(buf->data(), stream_0_data_->data() + offset, buf_len);
UpdateDataFromEntryStat(
- SimpleEntryStat(base::Time::Now(), last_modified_, data_size_));
+ SimpleEntryStat(base::Time::Now(), last_modified_, data_size_,
+ sparse_data_size_));
RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
return buf_len;
}
@@ -1324,7 +1495,8 @@ int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
base::Time modification_time = base::Time::Now();
AdvanceCrc(buf, offset, buf_len, 0);
UpdateDataFromEntryStat(
- SimpleEntryStat(modification_time, modification_time, data_size_));
+ SimpleEntryStat(modification_time, modification_time, data_size_,
+ sparse_data_size_));
RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
return buf_len;
}
diff --git a/net/disk_cache/simple/simple_entry_impl.h b/net/disk_cache/simple/simple_entry_impl.h
index e2f0c63..2d78d8b 100644
--- a/net/disk_cache/simple/simple_entry_impl.h
+++ b/net/disk_cache/simple/simple_entry_impl.h
@@ -188,6 +188,21 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
const CompletionCallback& callback,
bool truncate);
+ void ReadSparseDataInternal(int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback);
+
+ void WriteSparseDataInternal(int64 sparse_offset,
+ net::IOBuffer* buf,
+ int buf_len,
+ const CompletionCallback& callback);
+
+ void GetAvailableRangeInternal(int64 sparse_offset,
+ int len,
+ int64* out_start,
+ const CompletionCallback& callback);
+
void DoomEntryInternal(const CompletionCallback& callback);
// Called after a SimpleSynchronousEntry has completed CreateEntry() or
@@ -208,8 +223,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
// Internal utility method used by other completion methods. Calls
// |completion_callback| after updating state and dooming on errors.
- void EntryOperationComplete(int stream_index,
- const CompletionCallback& completion_callback,
+ void EntryOperationComplete(const CompletionCallback& completion_callback,
const SimpleEntryStat& entry_stat,
scoped_ptr<int> result);
@@ -227,6 +241,20 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
scoped_ptr<SimpleEntryStat> entry_stat,
scoped_ptr<int> result);
+ void ReadSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<base::Time> last_used,
+ scoped_ptr<int> result);
+
+ void WriteSparseOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<SimpleEntryStat> entry_stat,
+ scoped_ptr<int> result);
+
+ void GetAvailableRangeOperationComplete(
+ const CompletionCallback& completion_callback,
+ scoped_ptr<int> result);
+
// Called after an asynchronous doom completes.
void DoomOperationComplete(const CompletionCallback& callback,
State state_to_restore,
@@ -287,6 +315,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
base::Time last_used_;
base::Time last_modified_;
int32 data_size_[kSimpleEntryStreamCount];
+ int32 sparse_data_size_;
// Number of times this object has been returned from Backend::OpenEntry() and
// Backend::CreateEntry() without subsequent Entry::Close() calls. Used to
diff --git a/net/disk_cache/simple/simple_entry_operation.cc b/net/disk_cache/simple/simple_entry_operation.cc
index d4e7608..7dfe0d2 100644
--- a/net/disk_cache/simple/simple_entry_operation.cc
+++ b/net/disk_cache/simple/simple_entry_operation.cc
@@ -11,13 +11,36 @@
namespace disk_cache {
+namespace {
+
+bool IsReadWriteType(unsigned int type) {
+ return type == SimpleEntryOperation::TYPE_READ ||
+ type == SimpleEntryOperation::TYPE_WRITE ||
+ type == SimpleEntryOperation::TYPE_READ_SPARSE ||
+ type == SimpleEntryOperation::TYPE_WRITE_SPARSE;
+}
+
+bool IsReadType(unsigned type) {
+ return type == SimpleEntryOperation::TYPE_READ ||
+ type == SimpleEntryOperation::TYPE_READ_SPARSE;
+}
+
+bool IsSparseType(unsigned type) {
+ return type == SimpleEntryOperation::TYPE_READ_SPARSE ||
+ type == SimpleEntryOperation::TYPE_WRITE_SPARSE;
+}
+
+}
+
SimpleEntryOperation::SimpleEntryOperation(const SimpleEntryOperation& other)
: entry_(other.entry_.get()),
buf_(other.buf_),
callback_(other.callback_),
out_entry_(other.out_entry_),
offset_(other.offset_),
+ sparse_offset_(other.sparse_offset_),
length_(other.length_),
+ out_start_(other.out_start_),
type_(other.type_),
have_index_(other.have_index_),
index_(other.index_),
@@ -40,6 +63,8 @@ SimpleEntryOperation SimpleEntryOperation::OpenOperation(
out_entry,
0,
0,
+ 0,
+ NULL,
TYPE_OPEN,
have_index,
0,
@@ -60,6 +85,8 @@ SimpleEntryOperation SimpleEntryOperation::CreateOperation(
out_entry,
0,
0,
+ 0,
+ NULL,
TYPE_CREATE,
have_index,
0,
@@ -77,6 +104,8 @@ SimpleEntryOperation SimpleEntryOperation::CloseOperation(
NULL,
0,
0,
+ 0,
+ NULL,
TYPE_CLOSE,
false,
0,
@@ -99,7 +128,9 @@ SimpleEntryOperation SimpleEntryOperation::ReadOperation(
callback,
NULL,
offset,
+ 0,
length,
+ NULL,
TYPE_READ,
false,
index,
@@ -123,7 +154,9 @@ SimpleEntryOperation SimpleEntryOperation::WriteOperation(
callback,
NULL,
offset,
+ 0,
length,
+ NULL,
TYPE_WRITE,
false,
index,
@@ -133,13 +166,84 @@ SimpleEntryOperation SimpleEntryOperation::WriteOperation(
}
// static
+SimpleEntryOperation SimpleEntryOperation::ReadSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback) {
+ return SimpleEntryOperation(entry,
+ buf,
+ callback,
+ NULL,
+ 0,
+ sparse_offset,
+ length,
+ NULL,
+ TYPE_READ_SPARSE,
+ false,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// static
+SimpleEntryOperation SimpleEntryOperation::WriteSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback) {
+ return SimpleEntryOperation(entry,
+ buf,
+ callback,
+ NULL,
+ 0,
+ sparse_offset,
+ length,
+ NULL,
+ TYPE_WRITE_SPARSE,
+ false,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// static
+SimpleEntryOperation SimpleEntryOperation::GetAvailableRangeOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ int64* out_start,
+ const CompletionCallback& callback) {
+ return SimpleEntryOperation(entry,
+ NULL,
+ callback,
+ NULL,
+ 0,
+ sparse_offset,
+ length,
+ out_start,
+ TYPE_GET_AVAILABLE_RANGE,
+ false,
+ 0,
+ false,
+ false,
+ false);
+}
+
+// static
SimpleEntryOperation SimpleEntryOperation::DoomOperation(
SimpleEntryImpl* entry,
const CompletionCallback& callback) {
net::IOBuffer* const buf = NULL;
Entry** const out_entry = NULL;
const int offset = 0;
+ const int64 sparse_offset = 0;
const int length = 0;
+ int64* const out_start = NULL;
const bool have_index = false;
const int index = 0;
const bool truncate = false;
@@ -150,7 +254,9 @@ SimpleEntryOperation SimpleEntryOperation::DoomOperation(
callback,
out_entry,
offset,
+ sparse_offset,
length,
+ out_start,
TYPE_DOOM,
have_index,
index,
@@ -161,19 +267,38 @@ SimpleEntryOperation SimpleEntryOperation::DoomOperation(
bool SimpleEntryOperation::ConflictsWith(
const SimpleEntryOperation& other_op) const {
- if (type_ != TYPE_READ && type_ != TYPE_WRITE)
- return true;
- if (other_op.type() != TYPE_READ && other_op.type() != TYPE_WRITE)
+ EntryOperationType other_type = other_op.type();
+
+ // Non-read/write operations conflict with everything.
+ if (!IsReadWriteType(type_) || !IsReadWriteType(other_type))
return true;
- if (type() == TYPE_READ && other_op.type() == TYPE_READ)
+
+ // Reads (sparse or otherwise) conflict with nothing.
+ if (IsReadType(type_) && IsReadType(other_type))
return false;
+
+ // Sparse and non-sparse operations do not conflict with each other.
+ if (IsSparseType(type_) != IsSparseType(other_type)) {
+ return false;
+ }
+
+ // There must be two read/write operations, at least one must be a write, and
+ // they must be either both non-sparse or both sparse. Compare the streams
+ // and offsets to see whether they overlap.
+
+ if (IsSparseType(type_)) {
+ int64 end = sparse_offset_ + length_;
+ int64 other_op_end = other_op.sparse_offset() + other_op.length();
+ return sparse_offset_ < other_op_end && other_op.sparse_offset() < end;
+ }
+
if (index_ != other_op.index_)
return false;
int end = (type_ == TYPE_WRITE && truncate_) ? INT_MAX : offset_ + length_;
int other_op_end = (other_op.type() == TYPE_WRITE && other_op.truncate())
? INT_MAX
: other_op.offset() + other_op.length();
- return (offset_ < other_op_end && other_op.offset() < end);
+ return offset_ < other_op_end && other_op.offset() < end;
}
void SimpleEntryOperation::ReleaseReferences() {
@@ -187,7 +312,9 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
const CompletionCallback& callback,
Entry** out_entry,
int offset,
+ int64 sparse_offset,
int length,
+ int64* out_start,
EntryOperationType type,
bool have_index,
int index,
@@ -199,7 +326,9 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
callback_(callback),
out_entry_(out_entry),
offset_(offset),
+ sparse_offset_(sparse_offset),
length_(length),
+ out_start_(out_start),
type_(type),
have_index_(have_index),
index_(index),
diff --git a/net/disk_cache/simple/simple_entry_operation.h b/net/disk_cache/simple/simple_entry_operation.h
index 1c78701..0886312 100644
--- a/net/disk_cache/simple/simple_entry_operation.h
+++ b/net/disk_cache/simple/simple_entry_operation.h
@@ -31,7 +31,10 @@ class SimpleEntryOperation {
TYPE_CLOSE = 2,
TYPE_READ = 3,
TYPE_WRITE = 4,
- TYPE_DOOM = 5,
+ TYPE_READ_SPARSE = 5,
+ TYPE_WRITE_SPARSE = 6,
+ TYPE_GET_AVAILABLE_RANGE = 7,
+ TYPE_DOOM = 8,
};
SimpleEntryOperation(const SimpleEntryOperation& other);
@@ -63,7 +66,24 @@ class SimpleEntryOperation {
bool truncate,
bool optimistic,
const CompletionCallback& callback);
-
+ static SimpleEntryOperation ReadSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback);
+ static SimpleEntryOperation WriteSparseOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ net::IOBuffer* buf,
+ const CompletionCallback& callback);
+ static SimpleEntryOperation GetAvailableRangeOperation(
+ SimpleEntryImpl* entry,
+ int64 sparse_offset,
+ int length,
+ int64* out_start,
+ const CompletionCallback& callback);
static SimpleEntryOperation DoomOperation(
SimpleEntryImpl* entry,
const CompletionCallback& callback);
@@ -81,7 +101,9 @@ class SimpleEntryOperation {
bool have_index() const { return have_index_; }
int index() const { return index_; }
int offset() const { return offset_; }
+ int64 sparse_offset() const { return sparse_offset_; }
int length() const { return length_; }
+ int64* out_start() { return out_start_; }
net::IOBuffer* buf() { return buf_.get(); }
bool truncate() const { return truncate_; }
bool optimistic() const { return optimistic_; }
@@ -93,7 +115,9 @@ class SimpleEntryOperation {
const CompletionCallback& callback,
Entry** out_entry,
int offset,
+ int64 sparse_offset,
int length,
+ int64* out_start,
EntryOperationType type,
bool have_index,
int index,
@@ -111,18 +135,22 @@ class SimpleEntryOperation {
// Used in write and read operations.
const int offset_;
+ const int64 sparse_offset_;
const int length_;
- const unsigned int type_ : 3; /* 3 */
+ // Used in get available range operations.
+ int64* const out_start_;
+
+ const EntryOperationType type_;
// Used in open and create operations.
- const unsigned int have_index_ : 1; /* 4 */
+ const bool have_index_;
// Used in write and read operations.
- const unsigned int index_ : 2; /* 6 */
+ const unsigned int index_;
// Used only in write operations.
- const unsigned int truncate_ : 1; /* 7 */
- const unsigned int optimistic_ : 1; /* 8 */
+ const bool truncate_;
+ const bool optimistic_;
// Used only in SimpleCache.ReadIsParallelizable histogram.
- const unsigned int alone_in_queue_ : 1; /* 9 */
+ const bool alone_in_queue_;
};
} // namespace disk_cache
diff --git a/net/disk_cache/simple/simple_synchronous_entry.cc b/net/disk_cache/simple/simple_synchronous_entry.cc
index 659d1c1..81f52e1 100644
--- a/net/disk_cache/simple/simple_synchronous_entry.cc
+++ b/net/disk_cache/simple/simple_synchronous_entry.cc
@@ -34,6 +34,7 @@ using base::PLATFORM_FILE_ERROR_EXISTS;
using base::PLATFORM_FILE_ERROR_NOT_FOUND;
using base::PLATFORM_FILE_OK;
using base::PLATFORM_FILE_OPEN;
+using base::PLATFORM_FILE_OPEN_ALWAYS;
using base::PLATFORM_FILE_READ;
using base::PLATFORM_FILE_WRITE;
using base::ReadPlatformFile;
@@ -53,7 +54,8 @@ enum OpenEntryResult {
OPEN_ENTRY_CANT_READ_KEY = 5,
// OPEN_ENTRY_KEY_MISMATCH = 6, Deprecated.
OPEN_ENTRY_KEY_HASH_MISMATCH = 7,
- OPEN_ENTRY_MAX = 8,
+ OPEN_ENTRY_SPARSE_OPEN_FAILED = 8,
+ OPEN_ENTRY_MAX = 9,
};
// Used in histograms, please only add entries at the end.
@@ -128,15 +130,18 @@ namespace disk_cache {
using simple_util::GetEntryHashKey;
using simple_util::GetFilenameFromEntryHashAndFileIndex;
+using simple_util::GetSparseFilenameFromEntryHash;
using simple_util::GetDataSizeFromKeyAndFileSize;
using simple_util::GetFileSizeFromKeyAndDataSize;
using simple_util::GetFileIndexFromStreamIndex;
SimpleEntryStat::SimpleEntryStat(base::Time last_used,
base::Time last_modified,
- const int32 data_size[])
+ const int32 data_size[],
+ const int32 sparse_data_size)
: last_used_(last_used),
- last_modified_(last_modified) {
+ last_modified_(last_modified),
+ sparse_data_size_(sparse_data_size) {
memcpy(data_size_, data_size, sizeof(data_size_));
}
@@ -211,6 +216,12 @@ SimpleSynchronousEntry::EntryOperationData::EntryOperationData(int index_p,
truncate(truncate_p),
doomed(doomed_p) {}
+SimpleSynchronousEntry::EntryOperationData::EntryOperationData(
+ int64 sparse_offset_p,
+ int buf_len_p)
+ : sparse_offset(sparse_offset_p),
+ buf_len(buf_len_p) {}
+
// static
void SimpleSynchronousEntry::OpenEntry(
net::CacheType cache_type,
@@ -391,6 +402,213 @@ void SimpleSynchronousEntry::WriteData(const EntryOperationData& in_entry_op,
*out_result = buf_len;
}
+void SimpleSynchronousEntry::ReadSparseData(
+ const EntryOperationData& in_entry_op,
+ net::IOBuffer* out_buf,
+ base::Time* out_last_used,
+ int* out_result) {
+ DCHECK(initialized_);
+ int64 offset = in_entry_op.sparse_offset;
+ int buf_len = in_entry_op.buf_len;
+
+ char* buf = out_buf->data();
+ int read_so_far = 0;
+
+ // Find the first sparse range at or after the requested offset.
+ SparseRangeIterator it = sparse_ranges_.lower_bound(offset);
+
+ if (it != sparse_ranges_.begin()) {
+ // Hop back one range and read the one overlapping with the start.
+ --it;
+ SparseRange* found_range = &it->second;
+ DCHECK_EQ(it->first, found_range->offset);
+ if (found_range->offset + found_range->length > offset) {
+ DCHECK_LE(0, found_range->length);
+ DCHECK_GE(kint32max, found_range->length);
+ DCHECK_LE(0, offset - found_range->offset);
+ DCHECK_GE(kint32max, offset - found_range->offset);
+ int range_len_after_offset = found_range->length -
+ (offset - found_range->offset);
+ DCHECK_LE(0, range_len_after_offset);
+
+ int len_to_read = std::min(buf_len, range_len_after_offset);
+ if (!ReadSparseRange(found_range,
+ offset - found_range->offset,
+ len_to_read,
+ buf)) {
+ *out_result = net::ERR_CACHE_READ_FAILURE;
+ return;
+ }
+ read_so_far += len_to_read;
+ }
+ ++it;
+ }
+
+ // Keep reading until the buffer is full or there is not another contiguous
+ // range.
+ while (read_so_far < buf_len &&
+ it != sparse_ranges_.end() &&
+ it->second.offset == offset + read_so_far) {
+ SparseRange* found_range = &it->second;
+ DCHECK_EQ(it->first, found_range->offset);
+ int range_len = (found_range->length > kint32max) ?
+ kint32max : found_range->length;
+ int len_to_read = std::min(buf_len - read_so_far, range_len);
+ if (!ReadSparseRange(found_range, 0, len_to_read, buf + read_so_far)) {
+ *out_result = net::ERR_CACHE_READ_FAILURE;
+ return;
+ }
+ read_so_far += len_to_read;
+ ++it;
+ }
+
+ *out_result = read_so_far;
+}
+
+void SimpleSynchronousEntry::WriteSparseData(
+ const EntryOperationData& in_entry_op,
+ net::IOBuffer* in_buf,
+ int64 max_sparse_data_size,
+ SimpleEntryStat* out_entry_stat,
+ int* out_result) {
+ DCHECK(initialized_);
+ int64 offset = in_entry_op.sparse_offset;
+ int buf_len = in_entry_op.buf_len;
+
+ const char* buf = in_buf->data();
+ int written_so_far = 0;
+ int appended_so_far = 0;
+
+ if (!sparse_file_open() && !CreateSparseFile()) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+
+ int64 sparse_data_size = out_entry_stat->sparse_data_size();
+ // This is a pessimistic estimate; it assumes the entire buffer is going to
+ // be appended as a new range, not written over existing ranges.
+ if (sparse_data_size + buf_len > max_sparse_data_size) {
+ DLOG(INFO) << "Truncating sparse data file (" << sparse_data_size << " + "
+ << buf_len << " > " << max_sparse_data_size << ")";
+ TruncateSparseFile();
+ }
+
+ SparseRangeIterator it = sparse_ranges_.lower_bound(offset);
+
+ if (it != sparse_ranges_.begin()) {
+ --it;
+ SparseRange* found_range = &it->second;
+ if (found_range->offset + found_range->length > offset) {
+ DCHECK_LE(0, found_range->length);
+ DCHECK_GE(kint32max, found_range->length);
+ DCHECK_LE(0, offset - found_range->offset);
+ DCHECK_GE(kint32max, offset - found_range->offset);
+ int range_len_after_offset = found_range->length -
+ (offset - found_range->offset);
+ DCHECK_LE(0, range_len_after_offset);
+
+ int len_to_write = std::min(buf_len, range_len_after_offset);
+ if (!WriteSparseRange(found_range,
+ offset - found_range->offset,
+ len_to_write,
+ buf)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_write;
+ }
+ ++it;
+ }
+
+ while (written_so_far < buf_len &&
+ it != sparse_ranges_.end() &&
+ it->second.offset < offset + buf_len) {
+ SparseRange* found_range = &it->second;
+ if (offset + written_so_far < found_range->offset) {
+ int len_to_append = found_range->offset - (offset + written_so_far);
+ if (!AppendSparseRange(offset + written_so_far,
+ len_to_append,
+ buf + written_so_far)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_append;
+ appended_so_far += len_to_append;
+ }
+ int range_len = (found_range->length > kint32max) ?
+ kint32max : found_range->length;
+ int len_to_write = std::min(buf_len - written_so_far, range_len);
+ if (!WriteSparseRange(found_range,
+ 0,
+ len_to_write,
+ buf + written_so_far)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_write;
+ ++it;
+ }
+
+ if (written_so_far < buf_len) {
+ int len_to_append = buf_len - written_so_far;
+ if (!AppendSparseRange(offset + written_so_far,
+ len_to_append,
+ buf + written_so_far)) {
+ *out_result = net::ERR_CACHE_WRITE_FAILURE;
+ return;
+ }
+ written_so_far += len_to_append;
+ appended_so_far += len_to_append;
+ }
+
+ DCHECK_EQ(buf_len, written_so_far);
+
+ base::Time modification_time = Time::Now();
+ out_entry_stat->set_last_used(modification_time);
+ out_entry_stat->set_last_modified(modification_time);
+ int32 old_sparse_data_size = out_entry_stat->sparse_data_size();
+ out_entry_stat->set_sparse_data_size(old_sparse_data_size + appended_so_far);
+ *out_result = written_so_far;
+}
+
+void SimpleSynchronousEntry::GetAvailableRange(
+ const EntryOperationData& in_entry_op,
+ int64* out_start,
+ int* out_result) {
+ DCHECK(initialized_);
+ int64 offset = in_entry_op.sparse_offset;
+ int len = in_entry_op.buf_len;
+
+ SparseRangeIterator it = sparse_ranges_.lower_bound(offset);
+
+ int64 start = offset;
+ int avail_so_far = 0;
+
+ if (it != sparse_ranges_.end() && it->second.offset < offset + len)
+ start = it->second.offset;
+
+ if ((it == sparse_ranges_.end() || it->second.offset > offset) &&
+ it != sparse_ranges_.begin()) {
+ --it;
+ if (it->second.offset + it->second.length > offset) {
+ start = offset;
+ avail_so_far = (it->second.offset + it->second.length) - offset;
+ }
+ ++it;
+ }
+
+ while (start + avail_so_far < offset + len &&
+ it != sparse_ranges_.end() &&
+ it->second.offset == start + avail_so_far) {
+ avail_so_far += it->second.length;
+ ++it;
+ }
+
+ int len_from_start = len - (start - offset);
+ *out_start = start;
+ *out_result = std::min(avail_so_far, len_from_start);
+}
+
void SimpleSynchronousEntry::CheckEOFRecord(int index,
const SimpleEntryStat& entry_stat,
uint32 expected_crc32,
@@ -482,12 +700,16 @@ void SimpleSynchronousEntry::Close(
cluster_loss * 100 / (cluster_loss + file_size));
}
+ if (sparse_file_open()) {
+ bool did_close_file = ClosePlatformFile(sparse_file_);
+ CHECK(did_close_file);
+ }
+
if (files_created_) {
const int stream2_file_index = GetFileIndexFromStreamIndex(2);
SIMPLE_CACHE_UMA(BOOLEAN, "EntryCreatedAndStream2Omitted", cache_type_,
empty_file_omitted_[stream2_file_index]);
}
-
RecordCloseResult(cache_type_, CLOSE_RESULT_SUCCESS);
have_open_files_ = false;
delete this;
@@ -502,7 +724,8 @@ SimpleSynchronousEntry::SimpleSynchronousEntry(net::CacheType cache_type,
entry_hash_(entry_hash),
key_(key),
have_open_files_(false),
- initialized_(false) {
+ initialized_(false),
+ sparse_file_(kInvalidPlatformFileValue) {
for (int i = 0; i < kSimpleEntryFileCount; ++i) {
files_[i] = kInvalidPlatformFileValue;
empty_file_omitted_[i] = false;
@@ -683,6 +906,11 @@ void SimpleSynchronousEntry::CloseFile(int index) {
DCHECK(did_close);
files_[index] = kInvalidPlatformFileValue;
}
+
+ if (sparse_file_open()) {
+ bool did_close = CloseSparseFile();
+ DCHECK(did_close);
+ }
}
void SimpleSynchronousEntry::CloseFiles() {
@@ -764,6 +992,14 @@ int SimpleSynchronousEntry::InitializeForOpen(
}
}
+ int32 sparse_data_size = 0;
+ if (!OpenSparseFileIfExists(&sparse_data_size)) {
+ RecordSyncOpenResult(
+ cache_type_, OPEN_ENTRY_SPARSE_OPEN_FAILED, had_index);
+ return net::ERR_FAILED;
+ }
+ out_entry_stat->set_sparse_data_size(sparse_data_size);
+
bool removed_stream2 = false;
const int stream2_file_index = GetFileIndexFromStreamIndex(2);
DCHECK(CanOmitEmptyFile(stream2_file_index));
@@ -939,6 +1175,9 @@ bool SimpleSynchronousEntry::DeleteFilesForEntryHash(
if (!DeleteFileForEntryHash(path, entry_hash, i) && !CanOmitEmptyFile(i))
result = false;
}
+ FilePath to_delete = path.AppendASCII(
+ GetSparseFilenameFromEntryHash(entry_hash));
+ base::DeleteFile(to_delete, false);
return result;
}
@@ -963,4 +1202,277 @@ FilePath SimpleSynchronousEntry::GetFilenameFromFileIndex(int file_index) {
GetFilenameFromEntryHashAndFileIndex(entry_hash_, file_index));
}
+bool SimpleSynchronousEntry::OpenSparseFileIfExists(
+ int32* out_sparse_data_size) {
+ DCHECK(!sparse_file_open());
+
+ FilePath filename = path_.AppendASCII(
+ GetSparseFilenameFromEntryHash(entry_hash_));
+ int flags = PLATFORM_FILE_OPEN | PLATFORM_FILE_READ | PLATFORM_FILE_WRITE;
+ bool created;
+ PlatformFileError error;
+ sparse_file_ = CreatePlatformFile(filename, flags, &created, &error);
+ if (error == PLATFORM_FILE_ERROR_NOT_FOUND)
+ return true;
+
+ return ScanSparseFile(out_sparse_data_size);
+}
+
+bool SimpleSynchronousEntry::CreateSparseFile() {
+ DCHECK(!sparse_file_open());
+
+ FilePath filename = path_.AppendASCII(
+ GetSparseFilenameFromEntryHash(entry_hash_));
+ int flags = PLATFORM_FILE_CREATE | PLATFORM_FILE_READ | PLATFORM_FILE_WRITE;
+ bool created;
+ PlatformFileError error;
+ sparse_file_ = CreatePlatformFile(filename, flags, &created, &error);
+ if (error != PLATFORM_FILE_OK)
+ return false;
+
+ return InitializeSparseFile();
+}
+
+bool SimpleSynchronousEntry::CloseSparseFile() {
+ DCHECK(sparse_file_open());
+
+ bool did_close = ClosePlatformFile(sparse_file_);
+ if (did_close)
+ sparse_file_ = kInvalidPlatformFileValue;
+ return did_close;
+}
+
+bool SimpleSynchronousEntry::TruncateSparseFile() {
+ DCHECK(sparse_file_open());
+
+ int64 header_and_key_length = sizeof(SimpleFileHeader) + key_.size();
+ if (!TruncatePlatformFile(sparse_file_, header_and_key_length)) {
+ DLOG(WARNING) << "Could not truncate sparse file";
+ return false;
+ }
+
+ sparse_ranges_.clear();
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::InitializeSparseFile() {
+ DCHECK(sparse_file_open());
+
+ SimpleFileHeader header;
+ header.initial_magic_number = kSimpleInitialMagicNumber;
+ header.version = kSimpleVersion;
+ header.key_length = key_.size();
+ header.key_hash = base::Hash(key_);
+
+ int header_write_result =
+ WritePlatformFile(sparse_file_, 0, reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (header_write_result != sizeof(header)) {
+ DLOG(WARNING) << "Could not write sparse file header";
+ return false;
+ }
+
+ int key_write_result = WritePlatformFile(sparse_file_, sizeof(header),
+ key_.data(), key_.size());
+ if (key_write_result != implicit_cast<int>(key_.size())) {
+ DLOG(WARNING) << "Could not write sparse file key";
+ return false;
+ }
+
+ sparse_ranges_.clear();
+ sparse_tail_offset_ = sizeof(header) + key_.size();
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::ScanSparseFile(int32* out_sparse_data_size) {
+ DCHECK(sparse_file_open());
+
+ int32 sparse_data_size = 0;
+
+ SimpleFileHeader header;
+ int header_read_result =
+ ReadPlatformFile(sparse_file_, 0, reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (header_read_result != sizeof(header)) {
+ DLOG(WARNING) << "Could not read header from sparse file.";
+ return false;
+ }
+
+ if (header.initial_magic_number != kSimpleInitialMagicNumber) {
+ DLOG(WARNING) << "Sparse file magic number did not match.";
+ return false;
+ }
+
+ if (header.version != kSimpleVersion) {
+ DLOG(WARNING) << "Sparse file unreadable version.";
+ return false;
+ }
+
+ sparse_ranges_.clear();
+
+ int64 range_header_offset = sizeof(header) + key_.size();
+ while (1) {
+ SimpleFileSparseRangeHeader range_header;
+ int range_header_read_result =
+ ReadPlatformFile(sparse_file_,
+ range_header_offset,
+ reinterpret_cast<char*>(&range_header),
+ sizeof(range_header));
+ if (range_header_read_result == 0)
+ break;
+ if (range_header_read_result != sizeof(range_header)) {
+ DLOG(WARNING) << "Could not read sparse range header.";
+ return false;
+ }
+
+ if (range_header.sparse_range_magic_number !=
+ kSimpleSparseRangeMagicNumber) {
+ DLOG(WARNING) << "Invalid sparse range header magic number.";
+ return false;
+ }
+
+ SparseRange range;
+ range.offset = range_header.offset;
+ range.length = range_header.length;
+ range.data_crc32 = range_header.data_crc32;
+ range.file_offset = range_header_offset + sizeof(range_header);
+ sparse_ranges_.insert(std::make_pair(range.offset, range));
+
+ range_header_offset += sizeof(range_header) + range.length;
+
+ DCHECK_LE(sparse_data_size, sparse_data_size + range.length);
+ sparse_data_size += range.length;
+ }
+
+ *out_sparse_data_size = sparse_data_size;
+ sparse_tail_offset_ = range_header_offset;
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::ReadSparseRange(const SparseRange* range,
+ int offset, int len, char* buf) {
+ DCHECK(range);
+ DCHECK(buf);
+ DCHECK_GE(range->length, offset);
+ DCHECK_GE(range->length, offset + len);
+
+ int bytes_read = ReadPlatformFile(sparse_file_,
+ range->file_offset + offset,
+ buf, len);
+ if (bytes_read < len) {
+ DLOG(WARNING) << "Could not read sparse range.";
+ return false;
+ }
+
+ // If we read the whole range and we have a crc32, check it.
+ if (offset == 0 && len == range->length && range->data_crc32 != 0) {
+ uint32 actual_crc32 = crc32(crc32(0L, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(buf),
+ len);
+ if (actual_crc32 != range->data_crc32) {
+ DLOG(WARNING) << "Sparse range crc32 mismatch.";
+ return false;
+ }
+ }
+ // TODO(ttuttle): Incremental crc32 calculation?
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::WriteSparseRange(SparseRange* range,
+ int offset, int len,
+ const char* buf) {
+ DCHECK(range);
+ DCHECK(buf);
+ DCHECK_GE(range->length, offset);
+ DCHECK_GE(range->length, offset + len);
+
+ uint32 new_crc32 = 0;
+ if (offset == 0 && len == range->length) {
+ new_crc32 = crc32(crc32(0L, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(buf),
+ len);
+ }
+
+ if (new_crc32 != range->data_crc32) {
+ range->data_crc32 = new_crc32;
+
+ SimpleFileSparseRangeHeader header;
+ header.sparse_range_magic_number = kSimpleSparseRangeMagicNumber;
+ header.offset = range->offset;
+ header.length = range->length;
+ header.data_crc32 = range->data_crc32;
+
+ int bytes_written = WritePlatformFile(sparse_file_,
+ range->file_offset - sizeof(header),
+ reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (bytes_written != implicit_cast<int>(sizeof(header))) {
+ DLOG(WARNING) << "Could not rewrite sparse range header.";
+ return false;
+ }
+ }
+
+ int bytes_written = WritePlatformFile(sparse_file_,
+ range->file_offset + offset,
+ buf, len);
+ if (bytes_written < len) {
+ DLOG(WARNING) << "Could not write sparse range.";
+ return false;
+ }
+
+ return true;
+}
+
+bool SimpleSynchronousEntry::AppendSparseRange(int64 offset,
+ int len,
+ const char* buf) {
+ DCHECK_LE(0, offset);
+ DCHECK_LT(0, len);
+ DCHECK(buf);
+
+ uint32 data_crc32 = crc32(crc32(0L, Z_NULL, 0),
+ reinterpret_cast<const Bytef*>(buf),
+ len);
+
+ SimpleFileSparseRangeHeader header;
+ header.sparse_range_magic_number = kSimpleSparseRangeMagicNumber;
+ header.offset = offset;
+ header.length = len;
+ header.data_crc32 = data_crc32;
+
+ int bytes_written = WritePlatformFile(sparse_file_,
+ sparse_tail_offset_,
+ reinterpret_cast<char*>(&header),
+ sizeof(header));
+ if (bytes_written != implicit_cast<int>(sizeof(header))) {
+ DLOG(WARNING) << "Could not append sparse range header.";
+ return false;
+ }
+ sparse_tail_offset_ += bytes_written;
+
+ bytes_written = WritePlatformFile(sparse_file_,
+ sparse_tail_offset_,
+ buf,
+ len);
+ if (bytes_written < len) {
+ DLOG(WARNING) << "Could not append sparse range data.";
+ return false;
+ }
+ int64 data_file_offset = sparse_tail_offset_;
+ sparse_tail_offset_ += bytes_written;
+
+ SparseRange range;
+ range.offset = offset;
+ range.length = len;
+ range.data_crc32 = data_crc32;
+ range.file_offset = data_file_offset;
+ sparse_ranges_.insert(std::make_pair(offset, range));
+
+ return true;
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/simple/simple_synchronous_entry.h b/net/disk_cache/simple/simple_synchronous_entry.h
index 74823a2..2ae4c0d 100644
--- a/net/disk_cache/simple/simple_synchronous_entry.h
+++ b/net/disk_cache/simple/simple_synchronous_entry.h
@@ -6,6 +6,7 @@
#define NET_DISK_CACHE_SIMPLE_SIMPLE_SYNCHRONOUS_ENTRY_H_
#include <algorithm>
+#include <map>
#include <string>
#include <utility>
#include <vector>
@@ -35,7 +36,8 @@ class NET_EXPORT_PRIVATE SimpleEntryStat {
public:
SimpleEntryStat(base::Time last_used,
base::Time last_modified,
- const int32 data_size[]);
+ const int32 data_size[],
+ const int32 sparse_data_size);
int GetOffsetInFile(const std::string& key,
int offset,
@@ -56,10 +58,16 @@ class NET_EXPORT_PRIVATE SimpleEntryStat {
data_size_[stream_index] = data_size;
}
+ int32 sparse_data_size() const { return sparse_data_size_; }
+ void set_sparse_data_size(int32 sparse_data_size) {
+ sparse_data_size_ = sparse_data_size;
+ }
+
private:
base::Time last_used_;
base::Time last_modified_;
int32 data_size_[kSimpleEntryStreamCount];
+ int32 sparse_data_size_;
};
struct SimpleEntryCreationResults {
@@ -94,9 +102,11 @@ class SimpleSynchronousEntry {
int buf_len_p,
bool truncate_p,
bool doomed_p);
+ EntryOperationData(int64 sparse_offset_p, int buf_len_p);
int index;
int offset;
+ int64 sparse_offset;
int buf_len;
bool truncate;
bool doomed;
@@ -142,6 +152,19 @@ class SimpleSynchronousEntry {
uint32 expected_crc32,
int* out_result) const;
+ void ReadSparseData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* out_buf,
+ base::Time* out_last_used,
+ int* out_result);
+ void WriteSparseData(const EntryOperationData& in_entry_op,
+ net::IOBuffer* in_buf,
+ int64 max_sparse_data_size,
+ SimpleEntryStat* out_entry_stat,
+ int* out_result);
+ void GetAvailableRange(const EntryOperationData& in_entry_op,
+ int64* out_start,
+ int* out_result);
+
// Close all streams, and add write EOF records to streams indicated by the
// CRCRecord entries in |crc32s_to_write|.
void Close(const SimpleEntryStat& entry_stat,
@@ -165,6 +188,17 @@ class SimpleSynchronousEntry {
FILE_REQUIRED
};
+ struct SparseRange {
+ int64 offset;
+ int64 length;
+ uint32 data_crc32;
+ int64 file_offset;
+
+ bool operator<(const SparseRange& other) const {
+ return offset < other.offset;
+ }
+ };
+
SimpleSynchronousEntry(
net::CacheType cache_type,
const base::FilePath& path,
@@ -227,6 +261,39 @@ class SimpleSynchronousEntry {
int* out_data_size) const;
void Doom() const;
+ // Opens the sparse data file and scans it if it exists.
+ bool OpenSparseFileIfExists(int32* out_sparse_data_size);
+
+ // Creates and initializes the sparse data file.
+ bool CreateSparseFile();
+
+ // Closes the sparse data file.
+ bool CloseSparseFile();
+
+ // Writes the header to the (newly-created) sparse file.
+ bool InitializeSparseFile();
+
+ // Removes all but the header of the sparse file.
+ bool TruncateSparseFile();
+
+ // Scans the existing ranges in the sparse file. Populates |sparse_ranges_|
+ // and sets |*out_sparse_data_size| to the total size of all the ranges (not
+ // including headers).
+ bool ScanSparseFile(int32* out_sparse_data_size);
+
+ // Reads from a single sparse range. If asked to read the entire range, also
+ // verifies the CRC32.
+ bool ReadSparseRange(const SparseRange* range,
+ int offset, int len, char* buf);
+
+ // Writes to a single (existing) sparse range. If asked to write the entire
+ // range, also updates the CRC32; otherwise, invalidates it.
+ bool WriteSparseRange(SparseRange* range,
+ int offset, int len, const char* buf);
+
+ // Appends a new sparse range to the sparse data file.
+ bool AppendSparseRange(int64 offset, int len, const char* buf);
+
static bool DeleteFileForEntryHash(const base::FilePath& path,
uint64 entry_hash,
int file_index);
@@ -237,6 +304,10 @@ class SimpleSynchronousEntry {
base::FilePath GetFilenameFromFileIndex(int file_index);
+ bool sparse_file_open() const {
+ return sparse_file_ != base::kInvalidPlatformFileValue;
+ }
+
const net::CacheType cache_type_;
const base::FilePath path_;
const uint64 entry_hash_;
@@ -251,6 +322,14 @@ class SimpleSynchronousEntry {
// was created to store it.
bool empty_file_omitted_[kSimpleEntryFileCount];
+ typedef std::map<int64, SparseRange> SparseRangeOffsetMap;
+ typedef SparseRangeOffsetMap::iterator SparseRangeIterator;
+ SparseRangeOffsetMap sparse_ranges_;
+ base::PlatformFile sparse_file_;
+ // Offset of the end of the sparse file (where the next sparse range will be
+ // written).
+ int64 sparse_tail_offset_;
+
// True if the entry was created, or false if it was opened. Used to log
// SimpleCache.*.EntryCreatedWithStream2Omitted only for created entries.
bool files_created_;
diff --git a/net/disk_cache/simple/simple_util.cc b/net/disk_cache/simple/simple_util.cc
index 4291b1f..4afdc59 100644
--- a/net/disk_cache/simple/simple_util.cc
+++ b/net/disk_cache/simple/simple_util.cc
@@ -82,6 +82,10 @@ std::string GetFilenameFromEntryHashAndFileIndex(uint64 entry_hash,
return base::StringPrintf("%016" PRIx64 "_%1d", entry_hash, file_index);
}
+std::string GetSparseFilenameFromEntryHash(uint64 entry_hash) {
+ return base::StringPrintf("%016" PRIx64 "_s", entry_hash);
+}
+
std::string GetFilenameFromKeyAndFileIndex(const std::string& key,
int file_index) {
return GetEntryHashKeyAsHexString(key) +
diff --git a/net/disk_cache/simple/simple_util.h b/net/disk_cache/simple/simple_util.h
index 60a237e..f762151 100644
--- a/net/disk_cache/simple/simple_util.h
+++ b/net/disk_cache/simple/simple_util.h
@@ -48,6 +48,9 @@ NET_EXPORT_PRIVATE std::string GetFilenameFromKeyAndFileIndex(
std::string GetFilenameFromEntryHashAndFileIndex(uint64 entry_hash,
int file_index);
+// Given a |key| for an entry, returns the name of the sparse data file.
+std::string GetSparseFilenameFromEntryHash(uint64 entry_hash);
+
// Given the size of a file holding a stream in the simple backend and the key
// to an entry, returns the number of bytes in the stream.
NET_EXPORT_PRIVATE int32 GetDataSizeFromKeyAndFileSize(const std::string& key,