summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrvargas <rvargas@chromium.org>2014-12-05 13:33:49 -0800
committerCommit bot <commit-bot@chromium.org>2014-12-05 21:34:43 +0000
commit014cb2211155eadda438f241708a2ca07ab31970 (patch)
treecc9dfba5d6bdfb8b910de116a0c8b32d55f27cf0
parentcb4e417aca740b7039dfaafb181d62a12cc25460 (diff)
downloadchromium_src-014cb2211155eadda438f241708a2ca07ab31970.zip
chromium_src-014cb2211155eadda438f241708a2ca07ab31970.tar.gz
chromium_src-014cb2211155eadda438f241708a2ca07ab31970.tar.bz2
Disk cache: make sure that data outside of a sparse block is dropped.
Non-sequential sparse data writes may end up writing before or after a given sparse data block. Said data is only stored if the next write is sequential, and it actually fills a given block. This CL makes sure that data "before" the start of a given block is properly discarded (as in, not accounted for when looking for stored data). BUG=416895 Review URL: https://codereview.chromium.org/770153002 Cr-Commit-Position: refs/heads/master@{#307085}
-rw-r--r--net/disk_cache/blockfile/sparse_control.cc51
-rw-r--r--net/disk_cache/entry_unittest.cc117
2 files changed, 149 insertions, 19 deletions
diff --git a/net/disk_cache/blockfile/sparse_control.cc b/net/disk_cache/blockfile/sparse_control.cc
index 30ea836..e5096dc 100644
--- a/net/disk_cache/blockfile/sparse_control.cc
+++ b/net/disk_cache/blockfile/sparse_control.cc
@@ -483,7 +483,7 @@ bool SparseControl::OpenChild() {
return KillChildAndContinue(key, false);
if (child_data_.header.last_block_len < 0 ||
- child_data_.header.last_block_len > kBlockSize) {
+ child_data_.header.last_block_len >= kBlockSize) {
// Make sure these values are always within range.
child_data_.header.last_block_len = 0;
child_data_.header.last_block = -1;
@@ -590,7 +590,7 @@ bool SparseControl::VerifyRange() {
if (child_map_.FindNextBit(&start, last_bit, false)) {
// Something is not here.
DCHECK_GE(child_data_.header.last_block_len, 0);
- DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+ DCHECK_LT(child_data_.header.last_block_len, kBlockSize);
int partial_block_len = PartialBlockLength(start);
if (start == child_offset_ >> 10) {
// It looks like we don't have anything.
@@ -615,7 +615,7 @@ void SparseControl::UpdateRange(int result) {
return;
DCHECK_GE(child_data_.header.last_block_len, 0);
- DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+ DCHECK_LT(child_data_.header.last_block_len, kBlockSize);
// Write the bitmap.
int first_bit = child_offset_ >> 10;
@@ -651,11 +651,6 @@ int SparseControl::PartialBlockLength(int block_index) const {
if (block_index == child_data_.header.last_block)
return child_data_.header.last_block_len;
- // This may be the last stored index.
- int entry_len = child_->GetDataSize(kSparseData);
- if (block_index == entry_len >> 10)
- return entry_len & (kBlockSize - 1);
-
// This is really empty.
return 0;
}
@@ -769,27 +764,49 @@ int SparseControl::DoGetAvailableRange() {
if (!child_)
return child_len_; // Move on to the next child.
- // Check that there are no holes in this range.
- int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
+ // Bits on the bitmap should only be set when the corresponding block was
+ // fully written (it's really being used). If a block is partially used, it
+ // has to start with valid data, the length of the valid data is saved in
+ // |header.last_block_len| and the block itself should match
+ // |header.last_block|.
+ //
+ // In other words, (|header.last_block| + |header.last_block_len|) is the
+ // offset where the last write ended, and data in that block (which is not
+ // marked as used because it is not full) will only be reused if the next
+ // write continues at that point.
+ //
+ // This code has to find if there is any data between child_offset_ and
+ // child_offset_ + child_len_.
+ int last_bit = (child_offset_ + child_len_ + kBlockSize - 1) >> 10;
int start = child_offset_ >> 10;
int partial_start_bytes = PartialBlockLength(start);
int found = start;
int bits_found = child_map_.FindBits(&found, last_bit, true);
+ bool is_last_block_in_range = start < child_data_.header.last_block &&
+ child_data_.header.last_block < last_bit;
- // We don't care if there is a partial block in the middle of the range.
int block_offset = child_offset_ & (kBlockSize - 1);
- if (!bits_found && partial_start_bytes <= block_offset)
- return child_len_;
+ if (!bits_found && partial_start_bytes <= block_offset) {
+ if (!is_last_block_in_range)
+ return child_len_;
+ found = last_bit - 1; // There are some bytes here.
+ }
// We are done. Just break the loop and reset result_ to our real result.
range_found_ = true;
- // found now points to the first 1. Lets see if we have zeros before it.
- int empty_start = std::max((found << 10) - child_offset_, 0);
-
int bytes_found = bits_found << 10;
bytes_found += PartialBlockLength(found + bits_found);
+ // found now points to the first bytes. Lets see if we have data before it.
+ int empty_start = std::max((found << 10) - child_offset_, 0);
+ if (empty_start >= child_len_)
+ return child_len_;
+
+ // At this point we have bytes_found stored after (found << 10), and we want
+ // child_len_ bytes after child_offset_. The first empty_start bytes after
+ // child_offset_ are invalid.
+
if (start == found)
bytes_found -= block_offset;
@@ -798,7 +815,7 @@ int SparseControl::DoGetAvailableRange() {
// query that we have to subtract from the range that we searched.
result_ = std::min(bytes_found, child_len_ - empty_start);
- if (!bits_found) {
+ if (partial_start_bytes) {
result_ = std::min(partial_start_bytes - block_offset, child_len_);
empty_start = 0;
}
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index aac45ac3..03cc86b 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -1798,6 +1798,111 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
GetAvailableRange();
}
+// Tests that non-sequential writes that are not aligned with the minimum sparse
+// data granularity (1024 bytes) do in fact result in dropped data.
+TEST_F(DiskCacheEntryTest, SparseWriteDropped) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 180;
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ // Do small writes (180 bytes) that get increasingly close to a 1024-byte
+ // boundary. All data should be dropped until a boundary is crossed, at which
+ // point the data after the boundary is saved (at least for a while).
+ int offset = 1024 - 500;
+ int rv = 0;
+ net::TestCompletionCallback cb;
+ int64 start;
+ for (int i = 0; i < 5; i++) {
+ // Check result of last GetAvailableRange.
+ EXPECT_EQ(0, rv);
+
+ rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+
+ rv = entry->GetAvailableRange(offset - 100, kSize, &start, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+
+ rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
+ rv = cb.GetResult(rv);
+ if (!rv) {
+ rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+ rv = 0;
+ }
+ offset += 1024 * i + 100;
+ }
+
+ // The last write started 100 bytes below a bundary, so there should be 80
+ // bytes after the boundary.
+ EXPECT_EQ(80, rv);
+ EXPECT_EQ(1024 * 7, start);
+ rv = entry->ReadSparseData(start, buf_2.get(), kSize, cb.callback());
+ EXPECT_EQ(80, cb.GetResult(rv));
+ EXPECT_EQ(0, memcmp(buf_1.get()->data() + 100, buf_2.get()->data(), 80));
+
+ // And even that part is dropped when another write changes the offset.
+ offset = start;
+ rv = entry->WriteSparseData(0, buf_1.get(), kSize, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+
+ rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
+ EXPECT_EQ(0, cb.GetResult(rv));
+ entry->Close();
+}
+
+// Tests that small sequential writes are not dropped.
+TEST_F(DiskCacheEntryTest, SparseSquentialWriteNotDropped) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry(key, &entry));
+
+ const int kSize = 180;
+ scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
+ scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ // Any starting offset is fine as long as it is 1024-bytes aligned.
+ int rv = 0;
+ net::TestCompletionCallback cb;
+ int64 start;
+ int64 offset = 1024 * 11;
+ for (; offset < 20000; offset += kSize) {
+ rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+
+ rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+ EXPECT_EQ(offset, start);
+
+ rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+ EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
+ }
+
+ entry->Close();
+ FlushQueueForTest();
+
+ // Verify again the last write made.
+ ASSERT_EQ(net::OK, OpenEntry(key, &entry));
+ offset -= kSize;
+ rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+ EXPECT_EQ(offset, start);
+
+ rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
+ EXPECT_EQ(kSize, cb.GetResult(rv));
+ EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
+
+ entry->Close();
+}
+
void DiskCacheEntryTest::CouldBeSparse() {
std::string key("the first key");
disk_cache::Entry* entry;
@@ -2133,7 +2238,11 @@ void DiskCacheEntryTest::PartialSparseEntry() {
EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
// This read should not change anything.
- EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
+ if (memory_only_ || simple_cache_mode_)
+ EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
+ else
+ EXPECT_EQ(0, ReadSparseData(entry, 24000, buf2.get(), kSize));
+
EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
@@ -2153,7 +2262,11 @@ void DiskCacheEntryTest::PartialSparseEntry() {
EXPECT_EQ(500, cb.GetResult(rv));
EXPECT_EQ(kSize, start);
rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
- EXPECT_EQ(3616, cb.GetResult(rv));
+ if (memory_only_ || simple_cache_mode_)
+ EXPECT_EQ(3616, cb.GetResult(rv));
+ else
+ EXPECT_EQ(3072, cb.GetResult(rv));
+
EXPECT_EQ(20 * 1024, start);
// 1. Query before a filled 1KB block.