diff options
Diffstat (limited to 'net/disk_cache')
-rw-r--r-- | net/disk_cache/disk_format.h | 7 | ||||
-rw-r--r-- | net/disk_cache/entry_unittest.cc | 117 | ||||
-rw-r--r-- | net/disk_cache/rankings.cc | 14 | ||||
-rw-r--r-- | net/disk_cache/rankings.h | 5 | ||||
-rw-r--r-- | net/disk_cache/sparse_control.cc | 167 | ||||
-rw-r--r-- | net/disk_cache/sparse_control.h | 16 |
6 files changed, 277 insertions, 49 deletions
diff --git a/net/disk_cache/disk_format.h b/net/disk_cache/disk_format.h index c98fc34..8f0843d 100644 --- a/net/disk_cache/disk_format.h +++ b/net/disk_cache/disk_format.h @@ -233,11 +233,16 @@ COMPILE_ASSERT(sizeof(BlockFileHeader) == kBlockHeaderSize, bad_header); // This structure contains the control information for parent and child entries. // It is stored at offset 0 of the data stream with index 2. +// It is possible to write to a child entry in a way that causes the last block +// to be only partialy filled. In that case, last_block and last_block_len will +// keep track of that block. struct SparseHeader { int64 signature; // The parent and children signature. uint32 magic; // Structure identifier (equal to kIndexMagic). int32 parent_key_len; // Key length for the parent entry. - int32 dummy[4]; + int32 last_block; // Index of the last written block. + int32 last_block_len; // Lenght of the last written block. + int32 dummy[10]; }; // The SparseHeader will be followed by a bitmap, as described by this diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc index 86dd41e..a0f77ef 100644 --- a/net/disk_cache/entry_unittest.cc +++ b/net/disk_cache/entry_unittest.cc @@ -39,6 +39,7 @@ class DiskCacheEntryTest : public DiskCacheTestWithCache { void HugeSparseIO(bool async); void GetAvailableRange(); void DoomSparseEntry(); + void PartialSparseEntry(); }; void DiskCacheEntryTest::InternalSyncIO() { @@ -1185,3 +1186,119 @@ TEST_F(DiskCacheEntryTest, DISABLED_MemoryOnlyDoomSparseEntry) { InitCache(); DoomSparseEntry(); } + +void DiskCacheEntryTest::PartialSparseEntry() { + std::string key("the first key"); + disk_cache::Entry* entry; + ASSERT_TRUE(cache_->CreateEntry(key, &entry)); + + // We should be able to deal with IO that is not aligned to the block size + // of a sparse entry, at least to write a big range without leaving holes. + const int kSize = 4 * 1024; + scoped_refptr<net::IOBuffer> buf1 = new net::IOBuffer(kSize); + CacheTestFillBuffer(buf1->data(), kSize, false); + + // The first write is just to extend the entry. + EXPECT_EQ(kSize, entry->WriteSparseData(20000, buf1, kSize, NULL)); + EXPECT_EQ(kSize, entry->WriteSparseData(500, buf1, kSize, NULL)); + entry->Close(); + ASSERT_TRUE(cache_->OpenEntry(key, &entry)); + + scoped_refptr<net::IOBuffer> buf2 = new net::IOBuffer(kSize); + memset(buf2->data(), 0, kSize); + EXPECT_EQ(0, entry->ReadSparseData(8000, buf2, kSize, NULL)); + + EXPECT_EQ(500, entry->ReadSparseData(kSize, buf2, kSize, NULL)); + EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500)); + EXPECT_EQ(0, entry->ReadSparseData(0, buf2, kSize, NULL)); + + // This read should not change anything. + EXPECT_EQ(96, entry->ReadSparseData(24000, buf2, kSize, NULL)); + EXPECT_EQ(500, entry->ReadSparseData(kSize, buf2, kSize, NULL)); + EXPECT_EQ(0, entry->ReadSparseData(499, buf2, kSize, NULL)); + + int64 start; + if (memory_only_) { + EXPECT_EQ(100, entry->GetAvailableRange(0, 600, &start)); + EXPECT_EQ(500, start); + } else { + EXPECT_EQ(1024, entry->GetAvailableRange(0, 2048, &start)); + EXPECT_EQ(1024, start); + } + EXPECT_EQ(500, entry->GetAvailableRange(kSize, kSize, &start)); + EXPECT_EQ(kSize, start); + EXPECT_EQ(3616, entry->GetAvailableRange(20 * 1024, 10000, &start)); + EXPECT_EQ(20 * 1024, start); + + // Now make another write and verify that there is no hole in between. + EXPECT_EQ(kSize, entry->WriteSparseData(500 + kSize, buf1, kSize, NULL)); + EXPECT_EQ(7 * 1024 + 500, entry->GetAvailableRange(1024, 10000, &start)); + EXPECT_EQ(1024, start); + EXPECT_EQ(kSize, entry->ReadSparseData(kSize, buf2, kSize, NULL)); + EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500)); + EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500)); + + entry->Close(); +} + +TEST_F(DiskCacheEntryTest, PartialSparseEntry) { + InitCache(); + PartialSparseEntry(); +} + +TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) { + SetMemoryOnlyMode(); + InitCache(); + PartialSparseEntry(); +} + +TEST_F(DiskCacheEntryTest, CleanupSparseEntry) { + InitCache(); + std::string key("the first key"); + disk_cache::Entry* entry; + ASSERT_TRUE(cache_->CreateEntry(key, &entry)); + + // Corrupt sparse children should be removed automatically. + const int kSize = 4 * 1024; + scoped_refptr<net::IOBuffer> buf1 = new net::IOBuffer(kSize); + CacheTestFillBuffer(buf1->data(), kSize, false); + + const int k1Meg = 1024 * 1024; + EXPECT_EQ(kSize, entry->WriteSparseData(8192, buf1, kSize, NULL)); + EXPECT_EQ(kSize, entry->WriteSparseData(k1Meg + 8192, buf1, kSize, NULL)); + EXPECT_EQ(kSize, entry->WriteSparseData(2 * k1Meg + 8192, buf1, kSize, NULL)); + entry->Close(); + EXPECT_EQ(4, cache_->GetEntryCount()); + + void* iter = NULL; + int count = 0; + std::string child_key[2]; + while (cache_->OpenNextEntry(&iter, &entry)) { + ASSERT_TRUE(entry != NULL); + // Writing to an entry will alter the LRU list and invalidate the iterator. + if (entry->GetKey() != key && count < 2) + child_key[count++] = entry->GetKey(); + entry->Close(); + } + for (int i = 0; i < 2; i++) { + ASSERT_TRUE(cache_->OpenEntry(child_key[i], &entry)); + // Overwrite the header's magic and signature. + EXPECT_EQ(12, entry->WriteData(2, 0, buf1, 12, NULL, false)); + entry->Close(); + } + + EXPECT_EQ(4, cache_->GetEntryCount()); + ASSERT_TRUE(cache_->OpenEntry(key, &entry)); + + // Two children should be gone. One while reading and one while writing. + EXPECT_EQ(0, entry->ReadSparseData(2 * k1Meg + 8192, buf1, kSize, NULL)); + EXPECT_EQ(kSize, entry->WriteSparseData(k1Meg + 16384, buf1, kSize, NULL)); + EXPECT_EQ(0, entry->ReadSparseData(k1Meg + 8192, buf1, kSize, NULL)); + + // We never touched this one. + EXPECT_EQ(kSize, entry->ReadSparseData(8192, buf1, kSize, NULL)); + entry->Close(); + + // We re-created one of the corrupt children. + EXPECT_EQ(3, cache_->GetEntryCount()); +} diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc index f0dee9b..a88792f 100644 --- a/net/disk_cache/rankings.cc +++ b/net/disk_cache/rankings.cc @@ -394,6 +394,7 @@ void Rankings::Remove(CacheRankingsBlock* node, List list) { // entry. Otherwise we'll need reentrant transactions, which is an overkill. void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) { Time start = Time::Now(); + NotAnIterator(node); Remove(node, list); Insert(node, modified, list); CACHE_UMA(AGE_MS, "UpdateRank", 0, start); @@ -754,6 +755,19 @@ void Rankings::UpdateIterators(CacheRankingsBlock* node) { } } +void Rankings::NotAnIterator(CacheRankingsBlock* node) { +#ifdef NDEBUG + // This method is only enabled for debug builds. + return; +#endif + CacheAddr address = node->address().value(); + for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end(); + ++it) { + if (it->first == address) + NOTREACHED(); + } +} + void Rankings::IncrementCounter(List list) { if (!count_lists_) return; diff --git a/net/disk_cache/rankings.h b/net/disk_cache/rankings.h index 81567f0..0a8b113 100644 --- a/net/disk_cache/rankings.h +++ b/net/disk_cache/rankings.h @@ -95,7 +95,7 @@ class Rankings { List list; // Which entry was returned to the user. CacheRankingsBlock* nodes[3]; // Nodes on the first three lists. Rankings* my_rankings; - Iterator(Rankings* rankings) { + explicit Iterator(Rankings* rankings) { memset(this, 0, sizeof(Iterator)); my_rankings = rankings; } @@ -177,6 +177,9 @@ class Rankings { // Updates the iterators whenever node is being changed. void UpdateIterators(CacheRankingsBlock* node); + // Verifies that no iterator gets invalidated by changing a node. + void NotAnIterator(CacheRankingsBlock* node); + // Keeps track of the number of entries on a list. void IncrementCounter(List list); void DecrementCounter(List list); diff --git a/net/disk_cache/sparse_control.cc b/net/disk_cache/sparse_control.cc index 4035bd3..66096a2 100644 --- a/net/disk_cache/sparse_control.cc +++ b/net/disk_cache/sparse_control.cc @@ -27,6 +27,12 @@ const int kSparseData = 1; // We can have up to 64k children. const int kMaxMapSize = 8 * 1024; +// The maximum number of bytes that a child can store. +const int kMaxEntrySize = 0x100000; + +// The size of each data block (tracked by the child allocation bitmap). +const int kBlockSize = 1024; + // Returns the name of of a child entry given the base_name and signature of the // parent and the child_id. // If the entry is called entry_name, child entries will be named something @@ -124,7 +130,7 @@ void ChildrenDeleter::DeleteChildren() { this, &ChildrenDeleter::DeleteChildren)); } -} +} // namespace. namespace disk_cache { @@ -336,44 +342,32 @@ bool SparseControl::OpenChild() { // Se if we are tracking this child. bool child_present = ChildPresent(); - if (!child_present) { - if (kReadOperation == operation_) - return false; - if (kGetRangeOperation == operation_) - return true; - } - - if (!child_present || !entry_->backend_->OpenEntry(key, &child_)) { - if (!entry_->backend_->CreateEntry(key, &child_)) { - child_ = NULL; - result_ = net::ERR_CACHE_READ_FAILURE; - return false; - } - // Write signature. - InitChildData(); - return true; - } + if (!child_present || !entry_->backend_->OpenEntry(key, &child_)) + return ContinueWithoutChild(key); EntryImpl* child = static_cast<EntryImpl*>(child_); - if (!(CHILD_ENTRY & child->GetEntryFlags())) { - result_ = net::ERR_CACHE_OPERATION_NOT_SUPPORTED; - return false; - } + if (!(CHILD_ENTRY & child->GetEntryFlags()) || + child->GetDataSize(kSparseIndex) < + static_cast<int>(sizeof(child_data_))) + return KillChildAndContinue(key, false); scoped_refptr<net::WrappedIOBuffer> buf = new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)); // Read signature. int rv = child_->ReadData(kSparseIndex, 0, buf, sizeof(child_data_), NULL); - if (rv != sizeof(child_data_)) { - result_ = net::ERR_CACHE_READ_FAILURE; - return false; - } + if (rv != sizeof(child_data_)) + return KillChildAndContinue(key, true); // This is a fatal failure. - // TODO(rvargas): Proper error handling and check magic etc. - if (child_data_.header.signature != sparse_header_.signature) { - result_ = net::ERR_CACHE_READ_FAILURE; - return false; + if (child_data_.header.signature != sparse_header_.signature || + child_data_.header.magic != kIndexMagic) + return KillChildAndContinue(key, false); + + if (child_data_.header.last_block_len < 0 || + child_data_.header.last_block_len > kBlockSize) { + // Make sure this values are always within range. + child_data_.header.last_block_len = 0; + child_data_.header.last_block = -1; } return true; @@ -397,6 +391,36 @@ std::string SparseControl::GenerateChildKey() { offset_ >> 20); } +// We are deleting the child because something went wrong. +bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) { + SetChildBit(false); + child_->Doom(); + child_->Close(); + child_ = NULL; + if (fatal) { + result_ = net::ERR_CACHE_READ_FAILURE; + return false; + } + return ContinueWithoutChild(key); +} + +// We were not able to open this child; see what we can do. +bool SparseControl::ContinueWithoutChild(const std::string& key) { + if (kReadOperation == operation_) + return false; + if (kGetRangeOperation == operation_) + return true; + + if (!entry_->backend_->CreateEntry(key, &child_)) { + child_ = NULL; + result_ = net::ERR_CACHE_READ_FAILURE; + return false; + } + // Write signature. + InitChildData(); + return true; +} + bool SparseControl::ChildPresent() { int child_bit = static_cast<int>(offset_ >> 20); if (children_map_.Size() <= child_bit) @@ -405,14 +429,14 @@ bool SparseControl::ChildPresent() { return children_map_.Get(child_bit); } -void SparseControl::SetChildBit() { +void SparseControl::SetChildBit(bool value) { int child_bit = static_cast<int>(offset_ >> 20); // We may have to increase the bitmap of child entries. if (children_map_.Size() <= child_bit) children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true); - children_map_.Set(child_bit, true); + children_map_.Set(child_bit, value); } void SparseControl::WriteSparseData() { @@ -430,8 +454,8 @@ void SparseControl::WriteSparseData() { bool SparseControl::VerifyRange() { DCHECK_GE(result_, 0); - child_offset_ = static_cast<int>(offset_) & 0xfffff; - child_len_ = std::min(buf_len_, 0x100000 - child_offset_); + child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1); + child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_); // We can write to (or get info from) anywhere in this child. if (operation_ != kReadOperation) @@ -442,12 +466,23 @@ bool SparseControl::VerifyRange() { int start = child_offset_ >> 10; if (child_map_.FindNextBit(&start, last_bit, false)) { // Something is not here. - if (start == child_offset_ >> 10) - return false; + DCHECK_GE(child_data_.header.last_block_len, 0); + DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize); + int partial_block_len = PartialBlockLength(start); + if (start == child_offset_ >> 10) { + // It looks like we don't have anything. + if (partial_block_len <= (child_offset_ & (kBlockSize - 1))) + return false; + } // We have the first part. - // TODO(rvargas): Avoid coming back here again after the actual read. child_len_ = (start << 10) - child_offset_; + if (partial_block_len) { + // We may have a few extra bytes. + child_len_ = std::min(child_len_ + partial_block_len, buf_len_); + } + // There is no need to read more after this one. + buf_len_ = child_len_; } return true; } @@ -456,12 +491,43 @@ void SparseControl::UpdateRange(int result) { if (result <= 0 || operation_ != kWriteOperation) return; + DCHECK_GE(child_data_.header.last_block_len, 0); + DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize); + // Write the bitmap. - int last_bit = (child_offset_ + result + 1023) >> 10; - child_map_.SetRange(child_offset_ >> 10, last_bit, true); + int first_bit = child_offset_ >> 10; + int block_offset = child_offset_ & (kBlockSize - 1); + if (block_offset && (child_data_.header.last_block != first_bit || + child_data_.header.last_block_len < block_offset)) { + // The first block is not completely filled; ignore it. + first_bit++; + } + + int last_bit = (child_offset_ + result) >> 10; + block_offset = (child_offset_ + result) & (kBlockSize - 1); - // TODO(rvargas): Keep track of partial writes so that we don't consider the - // whole block to be present. + if (block_offset && !child_map_.Get(last_bit)) { + // The last block is not completely filled; save it for later. + child_data_.header.last_block = last_bit; + child_data_.header.last_block_len = block_offset; + } else { + child_data_.header.last_block = -1; + } + + child_map_.SetRange(first_bit, last_bit, true); +} + +int SparseControl::PartialBlockLength(int block_index) const { + if (block_index == child_data_.header.last_block) + return child_data_.header.last_block_len; + + // This may be the last stored index. + int entry_len = child_->GetDataSize(kSparseData); + if (block_index == entry_len >> 10) + return entry_len & (kBlockSize - 1); + + // This is really empty. + return 0; } void SparseControl::InitChildData() { @@ -479,7 +545,7 @@ void SparseControl::InitChildData() { NULL, false); if (rv != sizeof(child_data_)) DLOG(ERROR) << "Failed to save child data"; - SetChildBit(); + SetChildBit(true); } void SparseControl::DoChildrenIO() { @@ -532,6 +598,8 @@ bool SparseControl::DoChildIO() { } return false; } + if (!rv) + return false; DoChildIOCompleted(rv); return true; @@ -544,9 +612,12 @@ int SparseControl::DoGetAvailableRange() { // Check that there are no holes in this range. int last_bit = (child_offset_ + child_len_ + 1023) >> 10; int start = child_offset_ >> 10; + int partial_start_bytes = PartialBlockLength(start); int bits_found = child_map_.FindBits(&start, last_bit, true); - if (!bits_found) + // We don't care if there is a partial block in the middle of the range. + int block_offset = child_offset_ & (kBlockSize - 1); + if (!bits_found && partial_start_bytes <= block_offset) return child_len_; // We are done. Just break the loop and reset result_ to our real result. @@ -555,10 +626,18 @@ int SparseControl::DoGetAvailableRange() { // start now points to the first 1. Lets see if we have zeros before it. int empty_start = (start << 10) - child_offset_; + int bytes_found = bits_found << 10; + bytes_found += PartialBlockLength(start + bits_found); + // If the user is searching past the end of this child, bits_found is the // right result; otherwise, we have some empty space at the start of this // query that we have to substarct from the range that we searched. - result_ = std::min(bits_found << 10, child_len_ - empty_start); + result_ = std::min(bytes_found, child_len_ - empty_start); + + if (!bits_found) { + result_ = std::min(partial_start_bytes - block_offset, child_len_); + empty_start = 0; + } // Only update offset_ when this query found zeros at the start. if (empty_start) diff --git a/net/disk_cache/sparse_control.h b/net/disk_cache/sparse_control.h index c93e5e4..534d1a9 100644 --- a/net/disk_cache/sparse_control.h +++ b/net/disk_cache/sparse_control.h @@ -81,13 +81,19 @@ class SparseControl { void CloseChild(); std::string GenerateChildKey(); + // Deletes the current child and continues the current operation (open). + bool KillChildAndContinue(const std::string& key, bool fatal); + + // Continues the current operation (open) without a current child. + bool ContinueWithoutChild(const std::string& key); + // Returns true if the required child is tracked by the parent entry, i.e. it // was already created. bool ChildPresent(); - // Starts tracking this child. A new child entry was created so we must set - // the corresponding bit on the bitmap of children. - void SetChildBit(); + // Sets the bit for the current child to the provided |value|. In other words, + // starts or stops tracking this child. + void SetChildBit(bool value); // Writes to disk the tracking information for this entry. void WriteSparseData(); @@ -102,6 +108,10 @@ class SparseControl { // the current operation. void UpdateRange(int result); + // Returns the number of bytes stored at |block_index|, if its allocation-bit + // is off (because it is not completely filled). + int PartialBlockLength(int block_index) const; + // Initializes the sparse info for the current child. void InitChildData(); |