diff options
author | hclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-06-26 17:56:22 +0000 |
---|---|---|
committer | hclam@chromium.org <hclam@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-06-26 17:56:22 +0000 |
commit | b6e97b66c5777bbd1c5fef47a77247f6a3fb319e (patch) | |
tree | e95c0ff12aff46abfe87e028a734498896b2729c /net/disk_cache/mem_entry_impl.cc | |
parent | 8649fb3771f7d6227e5fea231050c7da63fb0e08 (diff) | |
download | chromium_src-b6e97b66c5777bbd1c5fef47a77247f6a3fb319e.zip chromium_src-b6e97b66c5777bbd1c5fef47a77247f6a3fb319e.tar.gz chromium_src-b6e97b66c5777bbd1c5fef47a77247f6a3fb319e.tar.bz2 |
Sparse IO implementation for memory-only cache
Implemented the following methods:
MemEntryImpl::ReadSparseData
MemEntryImpl::WriteSparseData
MemEntryImpl::GetAvailableRange
TEST=DiskCacheEntryTest.Memory*
original CL: http://codereview.chromium.org/140049
Review URL: http://codereview.chromium.org/147217
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@19380 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache/mem_entry_impl.cc')
-rw-r--r-- | net/disk_cache/mem_entry_impl.cc | 310 |
1 files changed, 277 insertions, 33 deletions
diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc index 4905785..bf6359d 100644 --- a/net/disk_cache/mem_entry_impl.cc +++ b/net/disk_cache/mem_entry_impl.cc @@ -11,6 +11,28 @@ using base::Time; +namespace { + +const int kSparseData = 1; + +// Maximum size of a sparse entry is 2 to the power of this number. +const int kMaxSparseEntryBits = 12; + +// Sparse entry has maximum size of 4KB. +const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits; + +// Convert global offset to child index. +inline int ToChildIndex(int64 offset) { + return static_cast<int>(offset >> kMaxSparseEntryBits); +} + +// Convert global offset to offset in child entry. +inline int ToChildOffset(int64 offset) { + return static_cast<int>(offset & (kMaxSparseEntrySize - 1)); +} + +} // nemespace + namespace disk_cache { MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) { @@ -18,6 +40,8 @@ MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) { backend_ = backend; ref_count_ = 0; parent_ = NULL; + child_id_ = 0; + child_first_pos_ = 0; next_ = NULL; prev_ = NULL; for (int i = 0; i < NUM_STREAMS; i++) @@ -34,25 +58,14 @@ bool MemEntryImpl::CreateEntry(const std::string& key) { key_ = key; last_modified_ = Time::Now(); last_used_ = Time::Now(); - type_ = kParentEntry; Open(); backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); return true; } -bool MemEntryImpl::CreateChildEntry(MemEntryImpl* parent) { - parent_ = parent; - last_modified_ = Time::Now(); - last_used_ = Time::Now(); - type_ = kChildEntry; - // Insert this to the backend's ranking list. - backend_->InsertIntoRankingList(this); - return true; -} - void MemEntryImpl::Close() { // Only a parent entry can be closed. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry); ref_count_--; DCHECK(ref_count_ >= 0); if (!ref_count_ && doomed_) @@ -61,14 +74,16 @@ void MemEntryImpl::Close() { void MemEntryImpl::Open() { // Only a parent entry can be opened. - DCHECK(type_ == kParentEntry); + // TODO(hclam): make sure it's correct to not apply the concept of ref + // counting to child entry. + DCHECK(type() == kParentEntry); ref_count_++; DCHECK(ref_count_ >= 0); DCHECK(!doomed_); } bool MemEntryImpl::InUse() { - if (type_ == kParentEntry) { + if (type() == kParentEntry) { return ref_count_ > 0; } else { // A child entry is always not in use. The consequence is that a child entry @@ -81,11 +96,11 @@ bool MemEntryImpl::InUse() { void MemEntryImpl::Doom() { if (doomed_) return; - if (type_ == kParentEntry) { + if (type() == kParentEntry) { // Perform internal doom from the backend if this is a parent entry. backend_->InternalDoomEntry(this); } else { - // Manually detach from the parent entry and perform internal doom. + // Manually detach from the backend and perform internal doom. backend_->RemoveFromRankingList(this); InternalDoom(); } @@ -94,10 +109,23 @@ void MemEntryImpl::Doom() { void MemEntryImpl::InternalDoom() { doomed_ = true; if (!ref_count_) { - if (type_ == kParentEntry) { - // TODO(hclam): doom all child entries associated with this entry. + if (type() == kParentEntry) { + // If this is a parent entry, we need to doom all the child entries. + if (children_.get()) { + EntryMap children; + children.swap(*children_); + for (EntryMap::iterator i = children.begin(); + i != children.end(); ++i) { + // Since a pointer to this object is also saved in the map, avoid + // dooming it. + if (i->second != this) + i->second->Doom(); + } + DCHECK(children_->size() == 0); + } } else { - // TODO(hclam): detach this child entry from the parent entry. + // If this is a child entry, detach it from the parent. + parent_->DetachChild(child_id_); } delete this; } @@ -105,7 +133,7 @@ void MemEntryImpl::InternalDoom() { std::string MemEntryImpl::GetKey() const { // A child entry doesn't have key so this method should not be called. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry); return key_; } @@ -120,16 +148,12 @@ Time MemEntryImpl::GetLastModified() const { int32 MemEntryImpl::GetDataSize(int index) const { if (index < 0 || index >= NUM_STREAMS) return 0; - - // TODO(hclam): handle the case when this is a parent entry and has associated - // child entries. return data_size_[index]; } int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback) { - // This method can only be called with a parent entry. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry || index == kSparseData); if (index < 0 || index >= NUM_STREAMS) return net::ERR_INVALID_ARGUMENT; @@ -152,8 +176,7 @@ int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback, bool truncate) { - // This method can only be called with a parent entry. - DCHECK(type_ == kParentEntry); + DCHECK(type() == kParentEntry || index == kSparseData); if (index < 0 || index >= NUM_STREAMS) return net::ERR_INVALID_ARGUMENT; @@ -166,9 +189,6 @@ int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, // offset of buf_len could be negative numbers. if (offset > max_file_size || buf_len > max_file_size || offset + buf_len > max_file_size) { - int size = offset + buf_len; - if (size <= max_file_size) - size = kint32max; return net::ERR_FAILED; } @@ -198,16 +218,160 @@ int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int MemEntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback) { - return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + DCHECK(type() == kParentEntry); + + if (!InitSparseInfo()) + return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + + if (offset < 0 || buf_len < 0 || !buf_len) + return net::ERR_INVALID_ARGUMENT; + + // We will keep using this buffer and adjust the offset in this buffer. + scoped_refptr<net::ReusedIOBuffer> io_buf = new net::ReusedIOBuffer(buf, + buf_len); + + // Counts the number of bytes read. + int bytes_read = 0; + + // Iterate until we have read enough. + while (bytes_read < buf_len) { + MemEntryImpl* child = OpenChild(offset + bytes_read, false); + + // No child present for that offset. + if (!child) + break; + + // We then need to prepare the child offset and len. + int child_offset = ToChildOffset(offset + bytes_read); + + // If we are trying to read from a position that the child entry has no data + // we should stop. + if (child_offset < child->child_first_pos_) + break; + int ret = child->ReadData(kSparseData, child_offset, io_buf, + buf_len - bytes_read, NULL); + + // If we encounter an error in one entry, return immediately. + if (ret < 0) + return ret; + else if (ret == 0) + break; + + // Increment the counter by number of bytes read in the child entry. + bytes_read += ret; + // And also adjust the buffer's offset. + if (bytes_read < buf_len) + io_buf->SetOffset(bytes_read); + } + + UpdateRank(false); + + return bytes_read; } int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback) { - return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + DCHECK(type() == kParentEntry); + + if (!InitSparseInfo()) + return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + + if (offset < 0 || buf_len < 0) + return net::ERR_INVALID_ARGUMENT; + + scoped_refptr<net::ReusedIOBuffer> io_buf = new net::ReusedIOBuffer(buf, + buf_len); + // Counter for amount of bytes written. + int bytes_written = 0; + + // This loop walks through child entries continuously starting from |offset| + // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each + // child entry until all |buf_len| bytes are written. The write operation can + // start in the middle of an entry. + while (bytes_written < buf_len) { + MemEntryImpl* child = OpenChild(offset + bytes_written, true); + int child_offset = ToChildOffset(offset + bytes_written); + + // Find the right amount to write, this evaluates the remaining bytes to + // write and remaining capacity of this child entry. + int write_len = std::min(buf_len - bytes_written, + kMaxSparseEntrySize - child_offset); + + // Keep a record of the last byte position (exclusive) in the child. + int data_size = child->GetDataSize(kSparseData); + + // Always writes to the child entry. This operation may overwrite data + // previously written. + // TODO(hclam): if there is data in the entry and this write is not + // continuous we may want to discard this write. + int ret = child->WriteData(kSparseData, child_offset, io_buf, write_len, + NULL, true); + if (ret < 0) + return ret; + else if (ret == 0) + break; + + // Keep a record of the first byte position in the child if the write was + // not aligned nor continuous. This is to enable witting to the middle + // of an entry and still keep track of data off the aligned edge. + if (data_size != child_offset) + child->child_first_pos_ = child_offset; + + // Increment the counter. + bytes_written += ret; + + // And adjust the offset in the IO buffer. + if (bytes_written < buf_len) + io_buf->SetOffset(bytes_written); + } + + UpdateRank(true); + + return bytes_written; } int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { - return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + DCHECK(type() == kParentEntry); + DCHECK(start); + + if (!InitSparseInfo()) + return net::ERR_CACHE_OPERATION_NOT_SUPPORTED; + + if (offset < 0 || len < 0 || !start) + return net::ERR_INVALID_ARGUMENT; + + MemEntryImpl* current_child = NULL; + + // Find the first child and record the number of empty bytes. + int empty = FindNextChild(offset, len, ¤t_child); + if (current_child) { + *start = offset + empty; + len -= empty; + + // Counts the number of continuous bytes. + int continuous = 0; + + // This loop scan for continuous bytes. + while (len && current_child) { + // Number of bytes available in this child. + int data_size = current_child->GetDataSize(kSparseData) - + ToChildOffset(*start + continuous); + if (data_size > len) + data_size = len; + + // We have found more continuous bytes so increment the count. Also + // decrement the length we should scan. + continuous += data_size; + len -= data_size; + + // If the next child is discontinuous, break the loop. + if (FindNextChild(*start + continuous, len, ¤t_child)) + break; + } + return continuous; + } + *start = offset; + return 0; } void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) { @@ -238,4 +402,84 @@ void MemEntryImpl::UpdateRank(bool modified) { backend_->UpdateRank(this); } +bool MemEntryImpl::InitSparseInfo() { + DCHECK(type() == kParentEntry); + + if (!children_.get()) { + // If we already have some data in sparse stream but we are being + // initialized as a sparse entry, we should fail. + if (GetDataSize(kSparseData)) + return false; + children_.reset(new EntryMap()); + + // The parent entry stores data for the first block, so save this object to + // index 0. + (*children_)[0] = this; + } + return true; +} + +bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id) { + DCHECK(!parent_); + DCHECK(!child_id_); + parent_ = parent; + child_id_ = child_id; + last_modified_ = Time::Now(); + last_used_ = Time::Now(); + // Insert this to the backend's ranking list. + backend_->InsertIntoRankingList(this); + return true; +} + +MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) { + DCHECK(type() == kParentEntry); + int index = ToChildIndex(offset); + EntryMap::iterator i = children_->find(index); + if (i != children_->end()) { + return i->second; + } else if (create) { + MemEntryImpl* child = new MemEntryImpl(backend_); + child->InitChildEntry(this, index); + (*children_)[index] = child; + return child; + } + return NULL; +} + +int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) { + DCHECK(child); + *child = NULL; + int scanned_len = 0; + + // This loop tries to find the first existing child. + while (scanned_len < len) { + // This points to the current offset in the child. + int current_child_offset = ToChildOffset(offset + scanned_len); + MemEntryImpl* current_child = OpenChild(offset + scanned_len, false); + if (current_child) { + int child_first_pos = current_child->child_first_pos_; + + // This points to the first byte that we should be reading from, we need + // to take care of the filled region and the current offset in the child. + int first_pos = std::max(current_child_offset, child_first_pos); + + // If the first byte position we should read from doesn't exceed the + // filled region, we have found the first child. + if (first_pos < current_child->GetDataSize(kSparseData)) { + *child = current_child; + + // We need to advance the scanned length. + scanned_len += first_pos - current_child_offset; + break; + } + } + scanned_len += kMaxSparseEntrySize - current_child_offset; + } + return scanned_len; +} + +void MemEntryImpl::DetachChild(int child_id) { + children_->erase(child_id); +} + } // namespace disk_cache |