summaryrefslogtreecommitdiffstats
path: root/net/disk_cache/memory
diff options
context:
space:
mode:
authorgavinp@chromium.org <gavinp@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-03-03 16:35:28 +0000
committergavinp@chromium.org <gavinp@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-03-03 16:35:28 +0000
commitc2c5cfc4250c95b73539f28c9f435416c8cae240 (patch)
treef16752986b98155982ed1d95e7e82c466363fe67 /net/disk_cache/memory
parentc7c1abc7b7c7ca539de99f0e73d067d80444e2b6 (diff)
downloadchromium_src-c2c5cfc4250c95b73539f28c9f435416c8cae240.zip
chromium_src-c2c5cfc4250c95b73539f28c9f435416c8cae240.tar.gz
chromium_src-c2c5cfc4250c95b73539f28c9f435416c8cae240.tar.bz2
Reland re-organization of net/disk_cache/
Originally this landed from https://codereview.chromium.org/121643003/ , but reverted due to an iOS build break. BUG=331062 TBR=mmenke@chromium.org,rvargas@chromium.org Review URL: https://codereview.chromium.org/185003007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@254474 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache/memory')
-rw-r--r--net/disk_cache/memory/mem_backend_impl.cc337
-rw-r--r--net/disk_cache/memory/mem_backend_impl.h120
-rw-r--r--net/disk_cache/memory/mem_entry_impl.cc631
-rw-r--r--net/disk_cache/memory/mem_entry_impl.h185
-rw-r--r--net/disk_cache/memory/mem_rankings.cc67
-rw-r--r--net/disk_cache/memory/mem_rankings.h44
6 files changed, 1384 insertions, 0 deletions
diff --git a/net/disk_cache/memory/mem_backend_impl.cc b/net/disk_cache/memory/mem_backend_impl.cc
new file mode 100644
index 0000000..e69c00e
--- /dev/null
+++ b/net/disk_cache/memory/mem_backend_impl.cc
@@ -0,0 +1,337 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/memory/mem_backend_impl.h"
+
+#include "base/logging.h"
+#include "base/sys_info.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/memory/mem_entry_impl.h"
+
+using base::Time;
+
+namespace {
+
+const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024;
+const int kCleanUpMargin = 1024 * 1024;
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+MemBackendImpl::MemBackendImpl(net::NetLog* net_log)
+ : max_size_(0), current_size_(0), net_log_(net_log) {}
+
+MemBackendImpl::~MemBackendImpl() {
+ EntryMap::iterator it = entries_.begin();
+ while (it != entries_.end()) {
+ it->second->Doom();
+ it = entries_.begin();
+ }
+ DCHECK(!current_size_);
+}
+
+// Static.
+scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
+ net::NetLog* net_log) {
+ scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
+ cache->SetMaxSize(max_bytes);
+ if (cache->Init())
+ return cache.PassAs<Backend>();
+
+ LOG(ERROR) << "Unable to create cache";
+ return scoped_ptr<Backend>();
+}
+
+bool MemBackendImpl::Init() {
+ if (max_size_)
+ return true;
+
+ int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
+
+ if (total_memory <= 0) {
+ max_size_ = kDefaultInMemoryCacheSize;
+ return true;
+ }
+
+ // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
+ // reached on systemd with more than 2.5 GB of RAM.
+ total_memory = total_memory * 2 / 100;
+ if (total_memory > kDefaultInMemoryCacheSize * 5)
+ max_size_ = kDefaultInMemoryCacheSize * 5;
+ else
+ max_size_ = static_cast<int32>(total_memory);
+
+ return true;
+}
+
+bool MemBackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ max_size_ = max_bytes;
+ return true;
+}
+
+void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
+ // Only parent entries can be passed into this method.
+ DCHECK(entry->type() == MemEntryImpl::kParentEntry);
+
+ rankings_.Remove(entry);
+ EntryMap::iterator it = entries_.find(entry->GetKey());
+ if (it != entries_.end())
+ entries_.erase(it);
+ else
+ NOTREACHED();
+
+ entry->InternalDoom();
+}
+
+void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
+ rankings_.UpdateRank(node);
+}
+
+void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (old_size >= new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+}
+
+int MemBackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
+ rankings_.Insert(entry);
+}
+
+void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
+ rankings_.Remove(entry);
+}
+
+net::CacheType MemBackendImpl::GetCacheType() const {
+ return net::MEMORY_CACHE;
+}
+
+int32 MemBackendImpl::GetEntryCount() const {
+ return static_cast<int32>(entries_.size());
+}
+
+int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ if (OpenEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ if (CreateEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntry(const std::string& key,
+ const CompletionCallback& callback) {
+ if (DoomEntry(key))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
+ if (DoomAllEntries())
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ const CompletionCallback& callback) {
+ if (DoomEntriesBetween(initial_time, end_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
+ const CompletionCallback& callback) {
+ if (DoomEntriesSince(initial_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+int MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) {
+ if (OpenNextEntry(iter, next_entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+void MemBackendImpl::EndEnumeration(void** iter) {
+ *iter = NULL;
+}
+
+void MemBackendImpl::OnExternalCacheHit(const std::string& key) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it != entries_.end()) {
+ UpdateRank(it->second);
+ }
+}
+
+bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it == entries_.end())
+ return false;
+
+ it->second->Open();
+
+ *entry = it->second;
+ return true;
+}
+
+bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it != entries_.end())
+ return false;
+
+ MemEntryImpl* cache_entry = new MemEntryImpl(this);
+ if (!cache_entry->CreateEntry(key, net_log_)) {
+ delete entry;
+ return false;
+ }
+
+ rankings_.Insert(cache_entry);
+ entries_[key] = cache_entry;
+
+ *entry = cache_entry;
+ return true;
+}
+
+bool MemBackendImpl::DoomEntry(const std::string& key) {
+ Entry* entry;
+ if (!OpenEntry(key, &entry))
+ return false;
+
+ entry->Doom();
+ entry->Close();
+ return true;
+}
+
+bool MemBackendImpl::DoomAllEntries() {
+ TrimCache(true);
+ return true;
+}
+
+bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
+ const Time end_time) {
+ if (end_time.is_null())
+ return DoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ MemEntryImpl* node = rankings_.GetNext(NULL);
+ // Last valid entry before |node|.
+ // Note, that entries after |node| may become invalid during |node| doom in
+ // case when they are child entries of it. It is guaranteed that
+ // parent node will go prior to it childs in ranking list (see
+ // InternalReadSparseData and InternalWriteSparseData).
+ MemEntryImpl* last_valid = NULL;
+
+ // rankings_ is ordered by last used, this will descend through the cache
+ // and start dooming items before the end_time, and will stop once it reaches
+ // an item used before the initial time.
+ while (node) {
+ if (node->GetLastUsed() < initial_time)
+ break;
+
+ if (node->GetLastUsed() < end_time)
+ node->Doom();
+ else
+ last_valid = node;
+ node = rankings_.GetNext(last_valid);
+ }
+
+ return true;
+}
+
+bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
+ for (;;) {
+ // Get the entry in the front.
+ Entry* entry = rankings_.GetNext(NULL);
+
+ // Break the loop when there are no more entries or the entry is too old.
+ if (!entry || entry->GetLastUsed() < initial_time)
+ return true;
+ entry->Doom();
+ }
+}
+
+bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
+ MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter);
+ MemEntryImpl* node = rankings_.GetNext(current);
+ // We should never return a child entry so iterate until we hit a parent
+ // entry.
+ while (node && node->type() != MemEntryImpl::kParentEntry) {
+ node = rankings_.GetNext(node);
+ }
+ *next_entry = node;
+ *iter = node;
+
+ if (node)
+ node->Open();
+
+ return NULL != node;
+}
+
+void MemBackendImpl::TrimCache(bool empty) {
+ MemEntryImpl* next = rankings_.GetPrev(NULL);
+ if (!next)
+ return;
+
+ int target_size = empty ? 0 : LowWaterAdjust(max_size_);
+ while (current_size_ > target_size && next) {
+ MemEntryImpl* node = next;
+ next = rankings_.GetPrev(next);
+ if (!node->InUse() || empty) {
+ node->Doom();
+ }
+ }
+
+ return;
+}
+
+void MemBackendImpl::AddStorageSize(int32 bytes) {
+ current_size_ += bytes;
+ DCHECK_GE(current_size_, 0);
+
+ if (current_size_ > max_size_)
+ TrimCache(false);
+}
+
+void MemBackendImpl::SubstractStorageSize(int32 bytes) {
+ current_size_ -= bytes;
+ DCHECK_GE(current_size_, 0);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/memory/mem_backend_impl.h b/net/disk_cache/memory/mem_backend_impl.h
new file mode 100644
index 0000000..5f31be5
--- /dev/null
+++ b/net/disk_cache/memory/mem_backend_impl.h
@@ -0,0 +1,120 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_MEMORY_MEM_BACKEND_IMPL_H_
+#define NET_DISK_CACHE_MEMORY_MEM_BACKEND_IMPL_H_
+
+#include "base/compiler_specific.h"
+#include "base/containers/hash_tables.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/memory/mem_rankings.h"
+
+namespace net {
+class NetLog;
+} // namespace net
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class implements the Backend interface. An object of this class handles
+// the operations of the cache without writing to disk.
+class NET_EXPORT_PRIVATE MemBackendImpl : public Backend {
+ public:
+ explicit MemBackendImpl(net::NetLog* net_log);
+ virtual ~MemBackendImpl();
+
+ // Returns an instance of a Backend implemented only in memory. The returned
+ // object should be deleted when not needed anymore. max_bytes is the maximum
+ // size the cache can grow to. If zero is passed in as max_bytes, the cache
+ // will determine the value to use based on the available memory. The returned
+ // pointer can be NULL if a fatal error is found.
+ static scoped_ptr<Backend> CreateBackend(int max_bytes, net::NetLog* net_log);
+
+ // Performs general initialization for this current instance of the cache.
+ bool Init();
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Permanently deletes an entry.
+ void InternalDoomEntry(MemEntryImpl* entry);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(MemEntryImpl* node);
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ // Insert an MemEntryImpl into the ranking list. This method is only called
+ // from MemEntryImpl to insert child entries. The reference can be removed
+ // by calling RemoveFromRankingList(|entry|).
+ void InsertIntoRankingList(MemEntryImpl* entry);
+
+ // Remove |entry| from ranking list. This method is only called from
+ // MemEntryImpl to remove a child entry from the ranking list.
+ void RemoveFromRankingList(MemEntryImpl* entry);
+
+ // Backend interface.
+ virtual net::CacheType GetCacheType() const OVERRIDE;
+ virtual int32 GetEntryCount() const OVERRIDE;
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntry(const std::string& key,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomAllEntries(const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual void EndEnumeration(void** iter) OVERRIDE;
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) OVERRIDE {}
+ virtual void OnExternalCacheHit(const std::string& key) OVERRIDE;
+
+ private:
+ typedef base::hash_map<std::string, MemEntryImpl*> EntryMap;
+
+ // Old Backend interface.
+ bool OpenEntry(const std::string& key, Entry** entry);
+ bool CreateEntry(const std::string& key, Entry** entry);
+ bool DoomEntry(const std::string& key);
+ bool DoomAllEntries();
+ bool DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+ bool DoomEntriesSince(const base::Time initial_time);
+ bool OpenNextEntry(void** iter, Entry** next_entry);
+
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ EntryMap entries_;
+ MemRankings rankings_; // Rankings to be able to trim the cache.
+ int32 max_size_; // Maximum data size for this instance.
+ int32 current_size_;
+
+ net::NetLog* net_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemBackendImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEMORY_MEM_BACKEND_IMPL_H_
diff --git a/net/disk_cache/memory/mem_entry_impl.cc b/net/disk_cache/memory/mem_entry_impl.cc
new file mode 100644
index 0000000..0b6d858
--- /dev/null
+++ b/net/disk_cache/memory/mem_entry_impl.cc
@@ -0,0 +1,631 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/memory/mem_entry_impl.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/memory/mem_backend_impl.h"
+#include "net/disk_cache/net_log_parameters.h"
+
+using base::Time;
+
+namespace {
+
+const int kSparseData = 1;
+
+// Maximum size of a sparse entry is 2 to the power of this number.
+const int kMaxSparseEntryBits = 12;
+
+// Sparse entry has maximum size of 4KB.
+const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits;
+
+// Convert global offset to child index.
+inline int ToChildIndex(int64 offset) {
+ return static_cast<int>(offset >> kMaxSparseEntryBits);
+}
+
+// Convert global offset to offset in child entry.
+inline int ToChildOffset(int64 offset) {
+ return static_cast<int>(offset & (kMaxSparseEntrySize - 1));
+}
+
+// Returns a name for a child entry given the base_name of the parent and the
+// child_id. This name is only used for logging purposes.
+// If the entry is called entry_name, child entries will be named something
+// like Range_entry_name:YYY where YYY is the number of the particular child.
+std::string GenerateChildName(const std::string& base_name, int child_id) {
+ return base::StringPrintf("Range_%s:%i", base_name.c_str(), child_id);
+}
+
+// Returns NetLog parameters for the creation of a child MemEntryImpl. Separate
+// function needed because child entries don't suppport GetKey().
+base::Value* NetLogChildEntryCreationCallback(
+ const disk_cache::MemEntryImpl* parent,
+ int child_id,
+ net::NetLog::LogLevel /* log_level */) {
+ base::DictionaryValue* dict = new base::DictionaryValue();
+ dict->SetString("key", GenerateChildName(parent->GetKey(), child_id));
+ dict->SetBoolean("created", true);
+ return dict;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
+ doomed_ = false;
+ backend_ = backend;
+ ref_count_ = 0;
+ parent_ = NULL;
+ child_id_ = 0;
+ child_first_pos_ = 0;
+ next_ = NULL;
+ prev_ = NULL;
+ for (int i = 0; i < NUM_STREAMS; i++)
+ data_size_[i] = 0;
+}
+
+// ------------------------------------------------------------------------
+
+bool MemEntryImpl::CreateEntry(const std::string& key, net::NetLog* net_log) {
+ key_ = key;
+ Time current = Time::Now();
+ last_modified_ = current;
+ last_used_ = current;
+
+ net_log_ = net::BoundNetLog::Make(net_log,
+ net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
+ // Must be called after |key_| is set, so GetKey() works.
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
+ CreateNetLogEntryCreationCallback(this, true));
+
+ Open();
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ return true;
+}
+
+void MemEntryImpl::InternalDoom() {
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
+ doomed_ = true;
+ if (!ref_count_) {
+ if (type() == kParentEntry) {
+ // If this is a parent entry, we need to doom all the child entries.
+ if (children_.get()) {
+ EntryMap children;
+ children.swap(*children_);
+ for (EntryMap::iterator i = children.begin();
+ i != children.end(); ++i) {
+ // Since a pointer to this object is also saved in the map, avoid
+ // dooming it.
+ if (i->second != this)
+ i->second->Doom();
+ }
+ DCHECK(children_->empty());
+ }
+ } else {
+ // If this is a child entry, detach it from the parent.
+ parent_->DetachChild(child_id_);
+ }
+ delete this;
+ }
+}
+
+void MemEntryImpl::Open() {
+ // Only a parent entry can be opened.
+ // TODO(hclam): make sure it's correct to not apply the concept of ref
+ // counting to child entry.
+ DCHECK(type() == kParentEntry);
+ ref_count_++;
+ DCHECK_GE(ref_count_, 0);
+ DCHECK(!doomed_);
+}
+
+bool MemEntryImpl::InUse() {
+ if (type() == kParentEntry) {
+ return ref_count_ > 0;
+ } else {
+ // A child entry is always not in use. The consequence is that a child entry
+ // can always be evicted while the associated parent entry is currently in
+ // used (i.e. opened).
+ return false;
+ }
+}
+
+// ------------------------------------------------------------------------
+
+void MemEntryImpl::Doom() {
+ if (doomed_)
+ return;
+ if (type() == kParentEntry) {
+ // Perform internal doom from the backend if this is a parent entry.
+ backend_->InternalDoomEntry(this);
+ } else {
+ // Manually detach from the backend and perform internal doom.
+ backend_->RemoveFromRankingList(this);
+ InternalDoom();
+ }
+}
+
+void MemEntryImpl::Close() {
+ // Only a parent entry can be closed.
+ DCHECK(type() == kParentEntry);
+ ref_count_--;
+ DCHECK_GE(ref_count_, 0);
+ if (!ref_count_ && doomed_)
+ InternalDoom();
+}
+
+std::string MemEntryImpl::GetKey() const {
+ // A child entry doesn't have key so this method should not be called.
+ DCHECK(type() == kParentEntry);
+ return key_;
+}
+
+Time MemEntryImpl::GetLastUsed() const {
+ return last_used_;
+}
+
+Time MemEntryImpl::GetLastModified() const {
+ return last_modified_;
+}
+
+int32 MemEntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index >= NUM_STREAMS)
+ return 0;
+ return data_size_[index];
+}
+
+int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
+ }
+
+ int result = InternalReadData(index, offset, buf, buf_len);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int MemEntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
+ }
+
+ int result = InternalWriteData(index, offset, buf, buf_len, truncate);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int MemEntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_READ,
+ CreateNetLogSparseOperationCallback(offset, buf_len));
+ }
+ int result = InternalReadSparseData(offset, buf, buf_len);
+ if (net_log_.IsLoggingAllEvents())
+ net_log_.EndEvent(net::NetLog::TYPE_SPARSE_READ);
+ return result;
+}
+
+int MemEntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_WRITE,
+ CreateNetLogSparseOperationCallback(offset, buf_len));
+ }
+ int result = InternalWriteSparseData(offset, buf, buf_len);
+ if (net_log_.IsLoggingAllEvents())
+ net_log_.EndEvent(net::NetLog::TYPE_SPARSE_WRITE);
+ return result;
+}
+
+int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) {
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_GET_RANGE,
+ CreateNetLogSparseOperationCallback(offset, len));
+ }
+ int result = GetAvailableRange(offset, len, start);
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_SPARSE_GET_RANGE,
+ CreateNetLogGetAvailableRangeResultCallback(*start, result));
+ }
+ return result;
+}
+
+bool MemEntryImpl::CouldBeSparse() const {
+ DCHECK_EQ(kParentEntry, type());
+ return (children_.get() != NULL);
+}
+
+int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
+ return net::OK;
+}
+
+// ------------------------------------------------------------------------
+
+MemEntryImpl::~MemEntryImpl() {
+ for (int i = 0; i < NUM_STREAMS; i++)
+ backend_->ModifyStorageSize(data_size_[i], 0);
+ backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
+ net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL);
+}
+
+int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf,
+ int buf_len) {
+ DCHECK(type() == kParentEntry || index == kSparseData);
+
+ if (index < 0 || index >= NUM_STREAMS)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = GetDataSize(index);
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ memcpy(buf->data(), &(data_[index])[offset], buf_len);
+ return buf_len;
+}
+
+int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
+ int buf_len, bool truncate) {
+ DCHECK(type() == kParentEntry || index == kSparseData);
+
+ if (index < 0 || index >= NUM_STREAMS)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset of buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ return net::ERR_FAILED;
+ }
+
+ // Read the size at this point.
+ int entry_size = GetDataSize(index);
+
+ PrepareTarget(index, offset, buf_len);
+
+ if (entry_size < offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ } else if (truncate) {
+ if (entry_size > offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ }
+ }
+
+ UpdateRank(true);
+
+ if (!buf_len)
+ return 0;
+
+ memcpy(&(data_[index])[offset], buf->data(), buf_len);
+ return buf_len;
+}
+
+int MemEntryImpl::InternalReadSparseData(int64 offset, IOBuffer* buf,
+ int buf_len) {
+ DCHECK(type() == kParentEntry);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ // We will keep using this buffer and adjust the offset in this buffer.
+ scoped_refptr<net::DrainableIOBuffer> io_buf(
+ new net::DrainableIOBuffer(buf, buf_len));
+
+ // Iterate until we have read enough.
+ while (io_buf->BytesRemaining()) {
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false);
+
+ // No child present for that offset.
+ if (!child)
+ break;
+
+ // We then need to prepare the child offset and len.
+ int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
+
+ // If we are trying to read from a position that the child entry has no data
+ // we should stop.
+ if (child_offset < child->child_first_pos_)
+ break;
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child->net_log().source(),
+ io_buf->BytesRemaining()));
+ }
+ int ret = child->ReadData(kSparseData, child_offset, io_buf.get(),
+ io_buf->BytesRemaining(), CompletionCallback());
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEventWithNetErrorCode(
+ net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, ret);
+ }
+
+ // If we encounter an error in one entry, return immediately.
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ break;
+
+ // Increment the counter by number of bytes read in the child entry.
+ io_buf->DidConsume(ret);
+ }
+
+ UpdateRank(false);
+
+ return io_buf->BytesConsumed();
+}
+
+int MemEntryImpl::InternalWriteSparseData(int64 offset, IOBuffer* buf,
+ int buf_len) {
+ DCHECK(type() == kParentEntry);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ scoped_refptr<net::DrainableIOBuffer> io_buf(
+ new net::DrainableIOBuffer(buf, buf_len));
+
+ // This loop walks through child entries continuously starting from |offset|
+ // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each
+ // child entry until all |buf_len| bytes are written. The write operation can
+ // start in the middle of an entry.
+ while (io_buf->BytesRemaining()) {
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true);
+ int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
+
+ // Find the right amount to write, this evaluates the remaining bytes to
+ // write and remaining capacity of this child entry.
+ int write_len = std::min(static_cast<int>(io_buf->BytesRemaining()),
+ kMaxSparseEntrySize - child_offset);
+
+ // Keep a record of the last byte position (exclusive) in the child.
+ int data_size = child->GetDataSize(kSparseData);
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child->net_log().source(),
+ write_len));
+ }
+
+ // Always writes to the child entry. This operation may overwrite data
+ // previously written.
+ // TODO(hclam): if there is data in the entry and this write is not
+ // continuous we may want to discard this write.
+ int ret = child->WriteData(kSparseData, child_offset, io_buf.get(),
+ write_len, CompletionCallback(), true);
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEventWithNetErrorCode(
+ net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, ret);
+ }
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ break;
+
+ // Keep a record of the first byte position in the child if the write was
+ // not aligned nor continuous. This is to enable witting to the middle
+ // of an entry and still keep track of data off the aligned edge.
+ if (data_size != child_offset)
+ child->child_first_pos_ = child_offset;
+
+ // Adjust the offset in the IO buffer.
+ io_buf->DidConsume(ret);
+ }
+
+ UpdateRank(true);
+
+ return io_buf->BytesConsumed();
+}
+
+int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
+ DCHECK(type() == kParentEntry);
+ DCHECK(start);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || len < 0 || !start)
+ return net::ERR_INVALID_ARGUMENT;
+
+ MemEntryImpl* current_child = NULL;
+
+ // Find the first child and record the number of empty bytes.
+ int empty = FindNextChild(offset, len, &current_child);
+ if (current_child) {
+ *start = offset + empty;
+ len -= empty;
+
+ // Counts the number of continuous bytes.
+ int continuous = 0;
+
+ // This loop scan for continuous bytes.
+ while (len && current_child) {
+ // Number of bytes available in this child.
+ int data_size = current_child->GetDataSize(kSparseData) -
+ ToChildOffset(*start + continuous);
+ if (data_size > len)
+ data_size = len;
+
+ // We have found more continuous bytes so increment the count. Also
+ // decrement the length we should scan.
+ continuous += data_size;
+ len -= data_size;
+
+ // If the next child is discontinuous, break the loop.
+ if (FindNextChild(*start + continuous, len, &current_child))
+ break;
+ }
+ return continuous;
+ }
+ *start = offset;
+ return 0;
+}
+
+void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
+ int entry_size = GetDataSize(index);
+
+ if (entry_size >= offset + buf_len)
+ return; // Not growing the stored data.
+
+ if (static_cast<int>(data_[index].size()) < offset + buf_len)
+ data_[index].resize(offset + buf_len);
+
+ if (offset <= entry_size)
+ return; // There is no "hole" on the stored data.
+
+ // Cleanup the hole not written by the user. The point is to avoid returning
+ // random stuff later on.
+ memset(&(data_[index])[entry_size], 0, offset - entry_size);
+}
+
+void MemEntryImpl::UpdateRank(bool modified) {
+ Time current = Time::Now();
+ last_used_ = current;
+
+ if (modified)
+ last_modified_ = current;
+
+ if (!doomed_)
+ backend_->UpdateRank(this);
+}
+
+bool MemEntryImpl::InitSparseInfo() {
+ DCHECK(type() == kParentEntry);
+
+ if (!children_.get()) {
+ // If we already have some data in sparse stream but we are being
+ // initialized as a sparse entry, we should fail.
+ if (GetDataSize(kSparseData))
+ return false;
+ children_.reset(new EntryMap());
+
+ // The parent entry stores data for the first block, so save this object to
+ // index 0.
+ (*children_)[0] = this;
+ }
+ return true;
+}
+
+bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id,
+ net::NetLog* net_log) {
+ DCHECK(!parent_);
+ DCHECK(!child_id_);
+
+ net_log_ = net::BoundNetLog::Make(net_log,
+ net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
+ base::Bind(&NetLogChildEntryCreationCallback, parent, child_id_));
+
+ parent_ = parent;
+ child_id_ = child_id;
+ Time current = Time::Now();
+ last_modified_ = current;
+ last_used_ = current;
+ // Insert this to the backend's ranking list.
+ backend_->InsertIntoRankingList(this);
+ return true;
+}
+
+MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) {
+ DCHECK(type() == kParentEntry);
+ int index = ToChildIndex(offset);
+ EntryMap::iterator i = children_->find(index);
+ if (i != children_->end()) {
+ return i->second;
+ } else if (create) {
+ MemEntryImpl* child = new MemEntryImpl(backend_);
+ child->InitChildEntry(this, index, net_log_.net_log());
+ (*children_)[index] = child;
+ return child;
+ }
+ return NULL;
+}
+
+int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) {
+ DCHECK(child);
+ *child = NULL;
+ int scanned_len = 0;
+
+ // This loop tries to find the first existing child.
+ while (scanned_len < len) {
+ // This points to the current offset in the child.
+ int current_child_offset = ToChildOffset(offset + scanned_len);
+ MemEntryImpl* current_child = OpenChild(offset + scanned_len, false);
+ if (current_child) {
+ int child_first_pos = current_child->child_first_pos_;
+
+ // This points to the first byte that we should be reading from, we need
+ // to take care of the filled region and the current offset in the child.
+ int first_pos = std::max(current_child_offset, child_first_pos);
+
+ // If the first byte position we should read from doesn't exceed the
+ // filled region, we have found the first child.
+ if (first_pos < current_child->GetDataSize(kSparseData)) {
+ *child = current_child;
+
+ // We need to advance the scanned length.
+ scanned_len += first_pos - current_child_offset;
+ break;
+ }
+ }
+ scanned_len += kMaxSparseEntrySize - current_child_offset;
+ }
+ return scanned_len;
+}
+
+void MemEntryImpl::DetachChild(int child_id) {
+ children_->erase(child_id);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/memory/mem_entry_impl.h b/net/disk_cache/memory/mem_entry_impl.h
new file mode 100644
index 0000000..aec8d22
--- /dev/null
+++ b/net/disk_cache/memory/mem_entry_impl.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_MEMORY_MEM_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_MEMORY_MEM_ENTRY_IMPL_H_
+
+#include "base/containers/hash_tables.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_log.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+class MemBackendImpl;
+
+// This class implements the Entry interface for the memory-only cache. An
+// object of this class represents a single entry on the cache. We use two
+// types of entries, parent and child to support sparse caching.
+//
+// A parent entry is non-sparse until a sparse method is invoked (i.e.
+// ReadSparseData, WriteSparseData, GetAvailableRange) when sparse information
+// is initialized. It then manages a list of child entries and delegates the
+// sparse API calls to the child entries. It creates and deletes child entries
+// and updates the list when needed.
+//
+// A child entry is used to carry partial cache content, non-sparse methods like
+// ReadData and WriteData cannot be applied to them. The lifetime of a child
+// entry is managed by the parent entry that created it except that the entry
+// can be evicted independently. A child entry does not have a key and it is not
+// registered in the backend's entry map. It is registered in the backend's
+// ranking list to enable eviction of a partial content.
+//
+// A sparse entry has a fixed maximum size and can be partially filled. There
+// can only be one continous filled region in a sparse entry, as illustrated by
+// the following example:
+// | xxx ooooo |
+// x = unfilled region
+// o = filled region
+// It is guranteed that there is at most one unfilled region and one filled
+// region, and the unfilled region (if there is one) is always before the filled
+// region. The book keeping for filled region in a sparse entry is done by using
+// the variable |child_first_pos_| (inclusive).
+
+class MemEntryImpl : public Entry {
+ public:
+ enum EntryType {
+ kParentEntry,
+ kChildEntry,
+ };
+
+ explicit MemEntryImpl(MemBackendImpl* backend);
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(const std::string& key, net::NetLog* net_log);
+
+ // Permanently destroys this entry.
+ void InternalDoom();
+
+ void Open();
+ bool InUse();
+
+ MemEntryImpl* next() const {
+ return next_;
+ }
+
+ MemEntryImpl* prev() const {
+ return prev_;
+ }
+
+ void set_next(MemEntryImpl* next) {
+ next_ = next;
+ }
+
+ void set_prev(MemEntryImpl* prev) {
+ prev_ = prev;
+ }
+
+ EntryType type() const {
+ return parent_ ? kChildEntry : kParentEntry;
+ }
+
+ const net::BoundNetLog& net_log() {
+ return net_log_;
+ }
+
+ // Entry interface.
+ virtual void Doom() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual std::string GetKey() const OVERRIDE;
+ virtual base::Time GetLastUsed() const OVERRIDE;
+ virtual base::Time GetLastModified() const OVERRIDE;
+ virtual int32 GetDataSize(int index) const OVERRIDE;
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback,
+ bool truncate) OVERRIDE;
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) OVERRIDE;
+ virtual bool CouldBeSparse() const OVERRIDE;
+ virtual void CancelSparseIO() OVERRIDE {}
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE;
+
+ private:
+ typedef base::hash_map<int, MemEntryImpl*> EntryMap;
+
+ enum {
+ NUM_STREAMS = 3
+ };
+
+ virtual ~MemEntryImpl();
+
+ // Do all the work for corresponding public functions. Implemented as
+ // separate functions to make logging of results simpler.
+ int InternalReadData(int index, int offset, IOBuffer* buf, int buf_len);
+ int InternalWriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ bool truncate);
+ int InternalReadSparseData(int64 offset, IOBuffer* buf, int buf_len);
+ int InternalWriteSparseData(int64 offset, IOBuffer* buf, int buf_len);
+
+ // Old Entry interface.
+ int GetAvailableRange(int64 offset, int len, int64* start);
+
+ // Grows and cleans up the data buffer.
+ void PrepareTarget(int index, int offset, int buf_len);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ // Initializes the children map and sparse info. This method is only called
+ // on a parent entry.
+ bool InitSparseInfo();
+
+ // Performs the initialization of a MemEntryImpl as a child entry.
+ // |parent| is the pointer to the parent entry. |child_id| is the ID of
+ // the new child.
+ bool InitChildEntry(MemEntryImpl* parent, int child_id, net::NetLog* net_log);
+
+ // Returns an entry responsible for |offset|. The returned entry can be a
+ // child entry or this entry itself if |offset| points to the first range.
+ // If such entry does not exist and |create| is true, a new child entry is
+ // created.
+ MemEntryImpl* OpenChild(int64 offset, bool create);
+
+ // Finds the first child located within the range [|offset|, |offset + len|).
+ // Returns the number of bytes ahead of |offset| to reach the first available
+ // bytes in the entry. The first child found is output to |child|.
+ int FindNextChild(int64 offset, int len, MemEntryImpl** child);
+
+ // Removes child indexed by |child_id| from the children map.
+ void DetachChild(int child_id);
+
+ std::string key_;
+ std::vector<char> data_[NUM_STREAMS]; // User data.
+ int32 data_size_[NUM_STREAMS];
+ int ref_count_;
+
+ int child_id_; // The ID of a child entry.
+ int child_first_pos_; // The position of the first byte in a child
+ // entry.
+ MemEntryImpl* next_; // Pointers for the LRU list.
+ MemEntryImpl* prev_;
+ MemEntryImpl* parent_; // Pointer to the parent entry.
+ scoped_ptr<EntryMap> children_;
+
+ base::Time last_modified_; // LRU information.
+ base::Time last_used_;
+ MemBackendImpl* backend_; // Back pointer to the cache.
+ bool doomed_; // True if this entry was removed from the cache.
+
+ net::BoundNetLog net_log_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemEntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEMORY_MEM_ENTRY_IMPL_H_
diff --git a/net/disk_cache/memory/mem_rankings.cc b/net/disk_cache/memory/mem_rankings.cc
new file mode 100644
index 0000000..ba5e00b
--- /dev/null
+++ b/net/disk_cache/memory/mem_rankings.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/memory/mem_rankings.h"
+
+#include "base/logging.h"
+#include "net/disk_cache/memory/mem_entry_impl.h"
+
+namespace disk_cache {
+
+MemRankings::~MemRankings() {
+ DCHECK(!head_ && !tail_);
+}
+
+void MemRankings::Insert(MemEntryImpl* node) {
+ if (head_)
+ head_->set_prev(node);
+
+ if (!tail_)
+ tail_ = node;
+
+ node->set_prev(NULL);
+ node->set_next(head_);
+ head_ = node;
+}
+
+void MemRankings::Remove(MemEntryImpl* node) {
+ MemEntryImpl* prev = node->prev();
+ MemEntryImpl* next = node->next();
+
+ if (head_ == node)
+ head_ = next;
+
+ if (tail_ == node)
+ tail_ = prev;
+
+ if (prev)
+ prev->set_next(next);
+
+ if (next)
+ next->set_prev(prev);
+
+ node->set_next(NULL);
+ node->set_prev(NULL);
+}
+
+void MemRankings::UpdateRank(MemEntryImpl* node) {
+ Remove(node);
+ Insert(node);
+}
+
+MemEntryImpl* MemRankings::GetNext(MemEntryImpl* node) {
+ if (!node)
+ return head_;
+
+ return node->next();
+}
+
+MemEntryImpl* MemRankings::GetPrev(MemEntryImpl* node) {
+ if (!node)
+ return tail_;
+
+ return node->prev();
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/memory/mem_rankings.h b/net/disk_cache/memory/mem_rankings.h
new file mode 100644
index 0000000..b75bfc1
--- /dev/null
+++ b/net/disk_cache/memory/mem_rankings.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_MEMORY_MEM_RANKINGS_H_
+#define NET_DISK_CACHE_MEMORY_MEM_RANKINGS_H_
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class handles the ranking information for the memory-only cache.
+class MemRankings {
+ public:
+ MemRankings() : head_(NULL), tail_(NULL) {}
+ ~MemRankings();
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(MemEntryImpl* node);
+
+ // Removes a given entry from the LRU list.
+ void Remove(MemEntryImpl* node);
+
+ // Moves a given entry to the head.
+ void UpdateRank(MemEntryImpl* node);
+
+ // Iterates through the list.
+ MemEntryImpl* GetNext(MemEntryImpl* node);
+ MemEntryImpl* GetPrev(MemEntryImpl* node);
+
+ private:
+ MemEntryImpl* head_;
+ MemEntryImpl* tail_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemRankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEMORY_MEM_RANKINGS_H_