diff options
-rw-r--r-- | net/disk_cache/entry_unittest.cc | 128 | ||||
-rw-r--r-- | net/disk_cache/mem_entry_impl.cc | 310 | ||||
-rw-r--r-- | net/disk_cache/mem_entry_impl.h | 66 | ||||
-rw-r--r-- | net/disk_cache/mem_rankings.cc | 5 | ||||
-rw-r--r-- | net/disk_cache/mem_rankings.h | 2 |
5 files changed, 438 insertions, 73 deletions
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc index 7b7d5d3..bf111ec 100644 --- a/net/disk_cache/entry_unittest.cc +++ b/net/disk_cache/entry_unittest.cc @@ -828,43 +828,37 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) { // enumerations. TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSlaveEntries) { SetMemoryOnlyMode(); - SetDirectMode(); - SetMaxSize(1024); InitCache(); - disk_cache::Entry* entry; - disk_cache::MemEntryImpl* parent_entry; + const int kSize = 4096; + scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize); + CacheTestFillBuffer(buf->data(), kSize, false); - ASSERT_TRUE(cache_->CreateEntry("parent", &entry)); - parent_entry = reinterpret_cast<disk_cache::MemEntryImpl*>(entry); - EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, parent_entry->type()); - parent_entry->Close(); + std::string key("the first key"); + disk_cache::Entry* parent_entry; + ASSERT_TRUE(cache_->CreateEntry(key, &parent_entry)); + + // Writes to the parent entry. + EXPECT_EQ(kSize, parent_entry->WriteSparseData(0, buf, kSize, NULL)); - disk_cache::MemEntryImpl* child_entry = - new disk_cache::MemEntryImpl(mem_cache_); - // TODO(hclam): we shouldn't create a child entry explicit. Once a parent - // entry can be triggered to create a child entry, we should change this - // to use another public method to do the creation. - EXPECT_TRUE(child_entry->CreateChildEntry(parent_entry)); - EXPECT_EQ(disk_cache::MemEntryImpl::kChildEntry, child_entry->type()); + // This write creates a child entry and writes to it. + EXPECT_EQ(kSize, parent_entry->WriteSparseData(8192, buf, kSize, NULL)); + + parent_entry->Close(); // Perform the enumerations. void* iter = NULL; + disk_cache::Entry* entry = NULL; int count = 0; while (cache_->OpenNextEntry(&iter, &entry)) { ASSERT_TRUE(entry != NULL); + ++count; disk_cache::MemEntryImpl* mem_entry = reinterpret_cast<disk_cache::MemEntryImpl*>(entry); EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type()); - EXPECT_TRUE(mem_entry == parent_entry); mem_entry->Close(); - ++count; } EXPECT_EQ(1, count); - - // TODO(hclam): remove this when parent entry can doom child entries - // internally. Now we have to doom this child entry manually. - child_entry->Doom(); } // Writes |buf_1| to offset and reads it back as |buf_2|. @@ -940,7 +934,7 @@ TEST_F(DiskCacheEntryTest, BasicSparseSyncIO) { BasicSparseIO(false); } -TEST_F(DiskCacheEntryTest, DISABLED_MemoryOnlyBasicSparseSyncIO) { +TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseSyncIO) { SetMemoryOnlyMode(); InitCache(); BasicSparseIO(false); @@ -951,7 +945,7 @@ TEST_F(DiskCacheEntryTest, BasicSparseAsyncIO) { BasicSparseIO(true); } -TEST_F(DiskCacheEntryTest, DISABLED_MemoryOnlyBasicSparseAsyncIO) { +TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseAsyncIO) { SetMemoryOnlyMode(); InitCache(); BasicSparseIO(true); @@ -983,7 +977,7 @@ TEST_F(DiskCacheEntryTest, HugeSparseSyncIO) { HugeSparseIO(false); } -TEST_F(DiskCacheEntryTest, DISABLED_MemoryOnlyHugeSparseSyncIO) { +TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseSyncIO) { SetMemoryOnlyMode(); InitCache(); HugeSparseIO(false); @@ -994,7 +988,7 @@ TEST_F(DiskCacheEntryTest, HugeSparseAsyncIO) { HugeSparseIO(true); } -TEST_F(DiskCacheEntryTest, DISABLED_MemoryOnlyHugeSparseAsyncIO) { +TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseAsyncIO) { SetMemoryOnlyMode(); InitCache(); HugeSparseIO(true); @@ -1047,8 +1041,90 @@ TEST_F(DiskCacheEntryTest, GetAvailableRange) { GetAvailableRange(); } -TEST_F(DiskCacheEntryTest, DISABLED_MemoryOnlyGetAvailableRange) { +TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) { SetMemoryOnlyMode(); InitCache(); GetAvailableRange(); } + +TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) { + SetMemoryOnlyMode(); + InitCache(); + + const int kSize = 8192; + scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(kSize); + scoped_refptr<net::IOBuffer> buf_2 = new net::IOBuffer(kSize); + CacheTestFillBuffer(buf_1->data(), kSize, false); + + std::string key("the first key"); + disk_cache::Entry* entry; + ASSERT_TRUE(cache_->CreateEntry(key, &entry)); + + // This loop writes back to back starting from offset 0 and 9000. + for (int i = 0; i < kSize; i += 1024) { + scoped_refptr<net::WrappedIOBuffer> buf_3 = + new net::WrappedIOBuffer(buf_1->data() + i); + VerifySparseIO(entry, i, buf_3, 1024, false, buf_2); + VerifySparseIO(entry, 9000 + i, buf_3, 1024, false, buf_2); + } + + // Make sure we have data written. + VerifyContentSparseIO(entry, 0, buf_1->data(), kSize, false); + VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize, false); + + // This tests a large write that spans 3 entries from a misaligned offset. + VerifySparseIO(entry, 20481, buf_1, 8192, false, buf_2); + + entry->Close(); +} + +TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) { + SetMemoryOnlyMode(); + InitCache(); + + const int kSize = 8192; + scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize); + CacheTestFillBuffer(buf->data(), kSize, false); + + disk_cache::Entry* entry; + std::string key("the first key"); + ASSERT_TRUE(cache_->CreateEntry(key, &entry)); + + // Writes in the middle of an entry. + EXPECT_EQ(1024, entry->WriteSparseData(0, buf, 1024, NULL)); + EXPECT_EQ(1024, entry->WriteSparseData(5120, buf, 1024, NULL)); + EXPECT_EQ(1024, entry->WriteSparseData(10000, buf, 1024, NULL)); + + // Writes in the middle of an entry and spans 2 child entries. + EXPECT_EQ(8192, entry->WriteSparseData(50000, buf, 8192, NULL)); + + int64 start; + // Test that we stop at a discontinuous child at the second block. + EXPECT_EQ(1024, entry->GetAvailableRange(0, 10000, &start)); + EXPECT_EQ(0, start); + + // Test that number of bytes is reported correctly when we start from the + // middle of a filled region. + EXPECT_EQ(512, entry->GetAvailableRange(512, 10000, &start)); + EXPECT_EQ(512, start); + + // Test that we found bytes in the child of next block. + EXPECT_EQ(1024, entry->GetAvailableRange(1024, 10000, &start)); + EXPECT_EQ(5120, start); + + // Test that the desired length is respected. It starts within a filled + // region. + EXPECT_EQ(512, entry->GetAvailableRange(5500, 512, &start)); + EXPECT_EQ(5500, start); + + // Test that the desired length is respected. It starts before a filled + // region. + EXPECT_EQ(500, entry->GetAvailableRange(5000, 620, &start)); + EXPECT_EQ(5120, start); + + // Test that multiple blocks are scanned. + EXPECT_EQ(8192, entry->GetAvailableRange(40000, 20000, &start)); + EXPECT_EQ(50000, start); + + entry->Close(); +} diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc index 4905785..bf6359d 100644 --- a/net/disk_cache/mem_entry_impl.cc +++ b/net/disk_cache/mem_entry_impl.cc @@ -11,6 +11,28 @@ using base::Time; +namespace { + +const int kSparseData = 1; + +// Maximum size of a sparse entry is 2 to the power of this number. +const int kMaxSparseEntryBits = 12; + +// Sparse entry has maximum size of 4KB. +const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits; + +// Convert global offset to child index. +inline int ToChildIndex(int64 offset) { + return static_cast<int>(offset >> kMaxSparseEntryBits); +} + +// Convert global offset to offset in child entry. +inline int ToChildOffset(int64 offset) { + return static_cast<int>(offset & (kMaxSparseEntrySize - 1)); +} + +} // nemespace + namespace disk_cache { MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) { @@ -18,6 +40,8 @@ MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) { backend_ = backend; ref_count_ = 0; parent_ = NULL; + child_id_ = 0; + child_first_pos_ = 0; next_ = NULL; prev_ = NULL; for (int i = 0; i < NUM_STREAMS; i++) @@ -34,25 +58,14 @@ bool MemEntryImpl::CreateEntry(const std::string& key) { key_ = key; last_modified_ = Time::Now(); last_used_ = Time::Now(); - type_ = kParentEntry; Open(); backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); return true; } -bool MemEntryImpl::CreateChildEntry(MemEntryImpl* parent) { - parent_ = parent; - last_modified_ = Time::Now(); - last_used_ = Time::Now(); - type_ = kChildEntry; - // Insert this to the backend's ranking list. - backend_->InsertIntoRankingList(this); - return true; -} - void MemEntryImpl::Close() { // Only a parent entry can be closed. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry); ref_count_--; DCHECK(ref_count_ >= 0); if (!ref_count_ && doomed_) @@ -61,14 +74,16 @@ void MemEntryImpl::Close() { void MemEntryImpl::Open() { // Only a parent entry can be opened. - DCHECK(type_ == kParentEntry); + // TODO(hclam): make sure it's correct to not apply the concept of ref + // counting to child entry. + DCHECK(type() == kParentEntry); ref_count_++; DCHECK(ref_count_ >= 0); DCHECK(!doomed_); } bool MemEntryImpl::InUse() { - if (type_ == kParentEntry) { + if (type() == kParentEntry) { return ref_count_ > 0; } else { // A child entry is always not in use. The consequence is that a child entry @@ -81,11 +96,11 @@ bool MemEntryImpl::InUse() { void MemEntryImpl::Doom() { if (doomed_) return; - if (type_ == kParentEntry) { + if (type() == kParentEntry) { // Perform internal doom from the backend if this is a parent entry. backend_->InternalDoomEntry(this); } else { - // Manually detach from the parent entry and perform internal doom. + // Manually detach from the backend and perform internal doom. backend_->RemoveFromRankingList(this); InternalDoom(); } @@ -94,10 +109,23 @@ void MemEntryImpl::Doom() { void MemEntryImpl::InternalDoom() { doomed_ = true; if (!ref_count_) { - if (type_ == kParentEntry) { - // TODO(hclam): doom all child entries associated with this entry. + if (type() == kParentEntry) { + // If this is a parent entry, we need to doom all the child entries. + if (children_.get()) { + EntryMap children; + children.swap(*children_); + for (EntryMap::iterator i = children.begin(); + i != children.end(); ++i) { + // Since a pointer to this object is also saved in the map, avoid + // dooming it. + if (i->second != this) + i->second->Doom(); + } + DCHECK(children_->size() == 0); + } } else { - // TODO(hclam): detach this child entry from the parent entry. + // If this is a child entry, detach it from the parent. + parent_->DetachChild(child_id_); } delete this; } @@ -105,7 +133,7 @@ void MemEntryImpl::InternalDoom() { std::string MemEntryImpl::GetKey() const { // A child entry doesn't have key so this method should not be called. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry); return key_; } @@ -120,16 +148,12 @@ Time MemEntryImpl::GetLastModified() const { int32 MemEntryImpl::GetDataSize(int index) const { if (index < 0 || index >= NUM_STREAMS) return 0; - - // TODO(hclam): handle the case when this is a parent entry and has associated - // child entries. return data_size_[index]; } int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback) { - // This method can only be called with a parent entry. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry || index == kSparseData); if (index < 0 || index >= NUM_STREAMS) return net::ERR_INVALID_ARGUMENT; @@ -152,8 +176,7 @@ int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback, bool truncate) { - // This method can only be called with a parent entry. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry || index == kSparseData); if (index < 0 || index >= NUM_STREAMS) return net::ERR_INVALID_ARGUMENT; @@ -166,9 +189,6 @@ int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, // offset of buf_len could be negative numbers. if (offset > max_file_size || buf_len > max_file_size || offset + buf_len > max_file_size) { - int size = offset + buf_len; - if (size <= max_file_size) - size = kint32max; return net::ERR_FAILED; } @@ -198,16 +218,160 @@ int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int MemEntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback) { - return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + DCHECK(type() == kParentEntry); + + if (!InitSparseInfo()) + return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + + if (offset < 0 || buf_len < 0 || !buf_len) + return net::ERR_INVALID_ARGUMENT; + + // We will keep using this buffer and adjust the offset in this buffer. + scoped_refptr<net::ReusedIOBuffer> io_buf = new net::ReusedIOBuffer(buf, + buf_len); + + // Counts the number of bytes read. + int bytes_read = 0; + + // Iterate until we have read enough. + while (bytes_read < buf_len) { + MemEntryImpl* child = OpenChild(offset + bytes_read, false); + + // No child present for that offset. + if (!child) + break; + + // We then need to prepare the child offset and len. + int child_offset = ToChildOffset(offset + bytes_read); + + // If we are trying to read from a position that the child entry has no data + // we should stop. + if (child_offset < child->child_first_pos_) + break; + int ret = child->ReadData(kSparseData, child_offset, io_buf, + buf_len - bytes_read, NULL); + + // If we encounter an error in one entry, return immediately. + if (ret < 0) + return ret; + else if (ret == 0) + break; + + // Increment the counter by number of bytes read in the child entry. + bytes_read += ret; + // And also adjust the buffer's offset. + if (bytes_read < buf_len) + io_buf->SetOffset(bytes_read); + } + + UpdateRank(false); + + return bytes_read; } int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback) { - return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + DCHECK(type() == kParentEntry); + + if (!InitSparseInfo()) + return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + + if (offset < 0 || buf_len < 0) + return net::ERR_INVALID_ARGUMENT; + + scoped_refptr<net::ReusedIOBuffer> io_buf = new net::ReusedIOBuffer(buf, + buf_len); + // Counter for amount of bytes written. + int bytes_written = 0; + + // This loop walks through child entries continuously starting from |offset| + // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each + // child entry until all |buf_len| bytes are written. The write operation can + // start in the middle of an entry. + while (bytes_written < buf_len) { + MemEntryImpl* child = OpenChild(offset + bytes_written, true); + int child_offset = ToChildOffset(offset + bytes_written); + + // Find the right amount to write, this evaluates the remaining bytes to + // write and remaining capacity of this child entry. + int write_len = std::min(buf_len - bytes_written, + kMaxSparseEntrySize - child_offset); + + // Keep a record of the last byte position (exclusive) in the child. + int data_size = child->GetDataSize(kSparseData); + + // Always writes to the child entry. This operation may overwrite data + // previously written. + // TODO(hclam): if there is data in the entry and this write is not + // continuous we may want to discard this write. + int ret = child->WriteData(kSparseData, child_offset, io_buf, write_len, + NULL, true); + if (ret < 0) + return ret; + else if (ret == 0) + break; + + // Keep a record of the first byte position in the child if the write was + // not aligned nor continuous. This is to enable witting to the middle + // of an entry and still keep track of data off the aligned edge. + if (data_size != child_offset) + child->child_first_pos_ = child_offset; + + // Increment the counter. + bytes_written += ret; + + // And adjust the offset in the IO buffer. + if (bytes_written < buf_len) + io_buf->SetOffset(bytes_written); + } + + UpdateRank(true); + + return bytes_written; } int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { - return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + DCHECK(type() == kParentEntry); + DCHECK(start); + + if (!InitSparseInfo()) + return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + + if (offset < 0 || len < 0 || !start) + return net::ERR_INVALID_ARGUMENT; + + MemEntryImpl* current_child = NULL; + + // Find the first child and record the number of empty bytes. + int empty = FindNextChild(offset, len, ¤t_child); + if (current_child) { + *start = offset + empty; + len -= empty; + + // Counts the number of continuous bytes. + int continuous = 0; + + // This loop scan for continuous bytes. + while (len && current_child) { + // Number of bytes available in this child. + int data_size = current_child->GetDataSize(kSparseData) - + ToChildOffset(*start + continuous); + if (data_size > len) + data_size = len; + + // We have found more continuous bytes so increment the count. Also + // decrement the length we should scan. + continuous += data_size; + len -= data_size; + + // If the next child is discontinuous, break the loop. + if (FindNextChild(*start + continuous, len, ¤t_child)) + break; + } + return continuous; + } + *start = offset; + return 0; } void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) { @@ -238,4 +402,84 @@ void MemEntryImpl::UpdateRank(bool modified) { backend_->UpdateRank(this); } +bool MemEntryImpl::InitSparseInfo() { + DCHECK(type() == kParentEntry); + + if (!children_.get()) { + // If we already have some data in sparse stream but we are being + // initialized as a sparse entry, we should fail. + if (GetDataSize(kSparseData)) + return false; + children_.reset(new EntryMap()); + + // The parent entry stores data for the first block, so save this object to + // index 0. + (*children_)[0] = this; + } + return true; +} + +bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id) { + DCHECK(!parent_); + DCHECK(!child_id_); + parent_ = parent; + child_id_ = child_id; + last_modified_ = Time::Now(); + last_used_ = Time::Now(); + // Insert this to the backend's ranking list. + backend_->InsertIntoRankingList(this); + return true; +} + +MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) { + DCHECK(type() == kParentEntry); + int index = ToChildIndex(offset); + EntryMap::iterator i = children_->find(index); + if (i != children_->end()) { + return i->second; + } else if (create) { + MemEntryImpl* child = new MemEntryImpl(backend_); + child->InitChildEntry(this, index); + (*children_)[index] = child; + return child; + } + return NULL; +} + +int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) { + DCHECK(child); + *child = NULL; + int scanned_len = 0; + + // This loop tries to find the first existing child. + while (scanned_len < len) { + // This points to the current offset in the child. + int current_child_offset = ToChildOffset(offset + scanned_len); + MemEntryImpl* current_child = OpenChild(offset + scanned_len, false); + if (current_child) { + int child_first_pos = current_child->child_first_pos_; + + // This points to the first byte that we should be reading from, we need + // to take care of the filled region and the current offset in the child. + int first_pos = std::max(current_child_offset, child_first_pos); + + // If the first byte position we should read from doesn't exceed the + // filled region, we have found the first child. + if (first_pos < current_child->GetDataSize(kSparseData)) { + *child = current_child; + + // We need to advance the scanned length. + scanned_len += first_pos - current_child_offset; + break; + } + } + scanned_len += kMaxSparseEntrySize - current_child_offset; + } + return scanned_len; +} + +void MemEntryImpl::DetachChild(int child_id) { + children_->erase(child_id); +} + } // namespace disk_cache diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h index c63f928..5f596bb 100644 --- a/net/disk_cache/mem_entry_impl.h +++ b/net/disk_cache/mem_entry_impl.h @@ -5,6 +5,8 @@ #ifndef NET_DISK_CACHE_MEM_ENTRY_IMPL_H_ #define NET_DISK_CACHE_MEM_ENTRY_IMPL_H_ +#include "base/hash_tables.h" +#include "base/scoped_ptr.h" #include "net/disk_cache/disk_cache.h" #include "testing/gtest/include/gtest/gtest_prod.h" @@ -15,17 +17,31 @@ class MemBackendImpl; // This class implements the Entry interface for the memory-only cache. An // object of this class represents a single entry on the cache. We use two // types of entries, parent and child to support sparse caching. +// // A parent entry is non-sparse until a sparse method is invoked (i.e. // ReadSparseData, WriteSparseData, GetAvailableRange) when sparse information // is initialized. It then manages a list of child entries and delegates the // sparse API calls to the child entries. It creates and deletes child entries // and updates the list when needed. +// // A child entry is used to carry partial cache content, non-sparse methods like // ReadData and WriteData cannot be applied to them. The lifetime of a child // entry is managed by the parent entry that created it except that the entry // can be evicted independently. A child entry does not have a key and it is not // registered in the backend's entry map. It is registered in the backend's // ranking list to enable eviction of a partial content. +// +// A sparse entry has a fixed maximum size and can be partially filled. There +// can only be one continous filled region in a sparse entry, as illustrated by +// the following example: +// | xxx ooooo | +// x = unfilled region +// o = filled region +// It is guranteed that there is at most one unfilled region and one filled +// region, and the unfilled region (if there is one) is always before the filled +// region. The book keeping for filled region in a sparse entry is done by using +// the variable |child_first_pos_| (inclusive). + class MemEntryImpl : public Entry { public: enum EntryType { @@ -57,12 +73,6 @@ class MemEntryImpl : public Entry { // cache. bool CreateEntry(const std::string& key); - // Performs the initialization of a MemEntryImpl as a child entry. - // TODO(hclam): this method should be private. Leave this as public because - // this is the only way to create a child entry. Move this method to private - // once child entries are created by parent entry. - bool CreateChildEntry(MemEntryImpl* parent); - // Permanently destroys this entry. void InternalDoom(); @@ -86,13 +96,15 @@ class MemEntryImpl : public Entry { } EntryType type() const { - return type_; + return parent_ ? kChildEntry : kParentEntry; } private: - enum { - NUM_STREAMS = 3 - }; + typedef base::hash_map<int, MemEntryImpl*> EntryMap; + + enum { + NUM_STREAMS = 3 + }; ~MemEntryImpl(); @@ -102,19 +114,47 @@ class MemEntryImpl : public Entry { // Updates ranking information. void UpdateRank(bool modified); + // Initializes the children map and sparse info. This method is only called + // on a parent entry. + bool InitSparseInfo(); + + // Performs the initialization of a MemEntryImpl as a child entry. + // |parent| is the pointer to the parent entry. |child_id| is the ID of + // the new child. + bool InitChildEntry(MemEntryImpl* parent, int child_id); + + // Returns an entry responsible for |offset|. The returned entry can be a + // child entry or this entry itself if |offset| points to the first range. + // If such entry does not exist and |create| is true, a new child entry is + // created. + MemEntryImpl* OpenChild(int64 offset, bool create); + + // Finds the first child located within the range [|offset|, |offset + len|). + // Returns the number of bytes ahead of |offset| to reach the first available + // bytes in the entry. The first child found is output to |child|. + int FindNextChild(int64 offset, int len, MemEntryImpl** child); + + // Removes child indexed by |child_id| from the children map. + void DetachChild(int child_id); + std::string key_; std::vector<char> data_[NUM_STREAMS]; // User data. int32 data_size_[NUM_STREAMS]; int ref_count_; - MemEntryImpl* next_; // Pointers for the LRU list. + MemEntryImpl* next_; // Pointers for the LRU list. MemEntryImpl* prev_; - MemEntryImpl* parent_; // Pointer to the parent entry. + MemEntryImpl* parent_; // Pointer to the parent entry. + scoped_ptr<EntryMap> children_; + + int child_id_; // The ID of a child entry. + int child_first_pos_; // The position of the first byte in a child + // entry. + base::Time last_modified_; // LRU information. base::Time last_used_; MemBackendImpl* backend_; // Back pointer to the cache. bool doomed_; // True if this entry was removed from the cache. - EntryType type_; // The type of this entry. DISALLOW_EVIL_CONSTRUCTORS(MemEntryImpl); }; diff --git a/net/disk_cache/mem_rankings.cc b/net/disk_cache/mem_rankings.cc index 6ca1bf7..d5f4a65 100644 --- a/net/disk_cache/mem_rankings.cc +++ b/net/disk_cache/mem_rankings.cc @@ -4,10 +4,15 @@ #include "net/disk_cache/mem_rankings.h" +#include "base/logging.h" #include "net/disk_cache/mem_entry_impl.h" namespace disk_cache { +MemRankings::~MemRankings() { + DCHECK(!head_ && !tail_); +} + void MemRankings::Insert(MemEntryImpl* node) { if (head_) head_->set_prev(node); diff --git a/net/disk_cache/mem_rankings.h b/net/disk_cache/mem_rankings.h index 9dd1564..680be4c 100644 --- a/net/disk_cache/mem_rankings.h +++ b/net/disk_cache/mem_rankings.h @@ -17,7 +17,7 @@ class MemEntryImpl; class MemRankings { public: MemRankings() : head_(NULL), tail_(NULL) {} - ~MemRankings() {} + ~MemRankings(); // Inserts a given entry at the head of the queue. void Insert(MemEntryImpl* node); |