summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoramineer <amineer@chromium.org>2016-02-12 17:19:43 -0800
committerCommit bot <commit-bot@chromium.org>2016-02-13 01:20:55 +0000
commite53d9c39108b08a8f5f329e0eb70b5c8c3626b3b (patch)
tree9c2633723f5e1a93982406e106a4c8b026f03fe5
parentbd95f3ca29d7e11a5c11def6b2a46ee0a23084de (diff)
downloadchromium_src-e53d9c39108b08a8f5f329e0eb70b5c8c3626b3b.zip
chromium_src-e53d9c39108b08a8f5f329e0eb70b5c8c3626b3b.tar.gz
chromium_src-e53d9c39108b08a8f5f329e0eb70b5c8c3626b3b.tar.bz2
Revert of Refactor and shorten in-memory cache. (patchset #37 id:611001 of https://codereview.chromium.org/23602008/ )
Reason for revert: Causing crashes, see bug 586440 Original issue's description: > Refactor and shorten in-memory cache. > > Previously this code was maintaining a complex doubly-linked list > manually, rather than using base::linked_list, which does the same thing. > > A number of uses of vector were strictly speaking violating the C++98 > specification, also fixed. > > Too many functions were short redirectors into other functions. Now fixed. > > R=mmenke > BUG=581791 TBR=mmenke@chromium.org,gavinp@chromium.org # Not skipping CQ checks because original CL landed more than 1 days ago. BUG=581791 Review URL: https://codereview.chromium.org/1692003004 Cr-Commit-Position: refs/heads/master@{#375313}
-rw-r--r--net/disk_cache/entry_unittest.cc2
-rw-r--r--net/disk_cache/memory/mem_backend_impl.cc311
-rw-r--r--net/disk_cache/memory/mem_backend_impl.h66
-rw-r--r--net/disk_cache/memory/mem_entry_impl.cc364
-rw-r--r--net/disk_cache/memory/mem_entry_impl.h126
-rw-r--r--net/disk_cache/memory/mem_rankings.cc67
-rw-r--r--net/disk_cache/memory/mem_rankings.h44
-rw-r--r--net/net.gypi2
8 files changed, 619 insertions, 363 deletions
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index 22f793b..ea58049 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -1633,7 +1633,7 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
++count;
disk_cache::MemEntryImpl* mem_entry =
reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
- EXPECT_EQ(disk_cache::MemEntryImpl::PARENT_ENTRY, mem_entry->type());
+ EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
mem_entry->Close();
}
EXPECT_EQ(1, count);
diff --git a/net/disk_cache/memory/mem_backend_impl.cc b/net/disk_cache/memory/mem_backend_impl.cc
index 6cabf34..5b37ca9 100644
--- a/net/disk_cache/memory/mem_backend_impl.cc
+++ b/net/disk_cache/memory/mem_backend_impl.cc
@@ -4,8 +4,6 @@
#include "net/disk_cache/memory/mem_backend_impl.h"
-#include <algorithm>
-#include <functional>
#include <utility>
#include "base/logging.h"
@@ -16,39 +14,36 @@
using base::Time;
-namespace disk_cache {
-
namespace {
const int kDefaultInMemoryCacheSize = 10 * 1024 * 1024;
-const int kDefaultEvictionSize = kDefaultInMemoryCacheSize / 10;
-
-bool CheckLRUListOrder(const base::LinkedList<MemEntryImpl>& lru_list) {
- // TODO(gavinp): Check MemBackendImpl::current_size_ here as well.
- base::Time previous_last_use_time;
- for (base::LinkNode<MemEntryImpl>* node = lru_list.head();
- node != lru_list.end(); node = node->next()) {
- if (node->value()->GetLastUsed() < previous_last_use_time)
- return false;
- previous_last_use_time = node->value()->GetLastUsed();
- }
- return true;
+const int kCleanUpMargin = 1024 * 1024;
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
}
} // namespace
+namespace disk_cache {
+
MemBackendImpl::MemBackendImpl(net::NetLog* net_log)
: max_size_(0), current_size_(0), net_log_(net_log), weak_factory_(this) {
}
MemBackendImpl::~MemBackendImpl() {
- DCHECK(CheckLRUListOrder(lru_list_));
- while (!entries_.empty())
- entries_.begin()->second->Doom();
+ EntryMap::iterator it = entries_.begin();
+ while (it != entries_.end()) {
+ it->second->Doom();
+ it = entries_.begin();
+ }
DCHECK(!current_size_);
}
-// static
+// Static.
scoped_ptr<Backend> MemBackendImpl::CreateBackend(int max_bytes,
net::NetLog* net_log) {
scoped_ptr<MemBackendImpl> cache(new MemBackendImpl(net_log));
@@ -72,7 +67,7 @@ bool MemBackendImpl::Init() {
}
// We want to use up to 2% of the computer's memory, with a limit of 50 MB,
- // reached on system with more than 2.5 GB of RAM.
+ // reached on systemd with more than 2.5 GB of RAM.
total_memory = total_memory * 2 / 100;
if (total_memory > kDefaultInMemoryCacheSize * 5)
max_size_ = kDefaultInMemoryCacheSize * 5;
@@ -96,32 +91,41 @@ bool MemBackendImpl::SetMaxSize(int max_bytes) {
return true;
}
-int MemBackendImpl::MaxFileSize() const {
- return max_size_ / 8;
+void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
+ // Only parent entries can be passed into this method.
+ DCHECK(entry->type() == MemEntryImpl::kParentEntry);
+
+ rankings_.Remove(entry);
+ EntryMap::iterator it = entries_.find(entry->GetKey());
+ if (it != entries_.end())
+ entries_.erase(it);
+ else
+ NOTREACHED();
+
+ entry->InternalDoom();
+}
+
+void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
+ rankings_.UpdateRank(node);
}
-void MemBackendImpl::OnEntryInserted(MemEntryImpl* entry) {
- lru_list_.Append(entry);
+void MemBackendImpl::ModifyStorageSize(int32_t old_size, int32_t new_size) {
+ if (old_size >= new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
}
-void MemBackendImpl::OnEntryUpdated(MemEntryImpl* entry) {
- DCHECK(CheckLRUListOrder(lru_list_));
- // LinkedList<>::RemoveFromList() removes |entry| from |lru_list_|.
- entry->RemoveFromList();
- lru_list_.Append(entry);
+int MemBackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
}
-void MemBackendImpl::OnEntryDoomed(MemEntryImpl* entry) {
- DCHECK(CheckLRUListOrder(lru_list_));
- if (entry->type() == MemEntryImpl::PARENT_ENTRY)
- entries_.erase(entry->key());
- // LinkedList<>::RemoveFromList() removes |entry| from |lru_list_|.
- entry->RemoveFromList();
+void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
+ rankings_.Insert(entry);
}
-void MemBackendImpl::ModifyStorageSize(int32_t delta) {
- current_size_ += delta;
- EvictIfNeeded();
+void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
+ rankings_.Remove(entry);
}
net::CacheType MemBackendImpl::GetCacheType() const {
@@ -132,71 +136,52 @@ int32_t MemBackendImpl::GetEntryCount() const {
return static_cast<int32_t>(entries_.size());
}
-int MemBackendImpl::OpenEntry(const std::string& key,
- Entry** entry,
+int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
const CompletionCallback& callback) {
- EntryMap::iterator it = entries_.find(key);
- if (it == entries_.end())
- return net::ERR_FAILED;
-
- it->second->Open();
+ if (OpenEntry(key, entry))
+ return net::OK;
- *entry = it->second;
- return net::OK;
+ return net::ERR_FAILED;
}
-int MemBackendImpl::CreateEntry(const std::string& key,
- Entry** entry,
+int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
const CompletionCallback& callback) {
- std::pair<EntryMap::iterator, bool> create_result =
- entries_.insert(EntryMap::value_type(key, nullptr));
- const bool did_insert = create_result.second;
- if (!did_insert)
- return net::ERR_FAILED;
+ if (CreateEntry(key, entry))
+ return net::OK;
- MemEntryImpl* cache_entry = new MemEntryImpl(this, key, net_log_);
- create_result.first->second = cache_entry;
- *entry = cache_entry;
- return net::OK;
+ return net::ERR_FAILED;
}
int MemBackendImpl::DoomEntry(const std::string& key,
const CompletionCallback& callback) {
- EntryMap::iterator it = entries_.find(key);
- if (it == entries_.end())
- return net::ERR_FAILED;
+ if (DoomEntry(key))
+ return net::OK;
- it->second->Doom();
- return net::OK;
+ return net::ERR_FAILED;
}
int MemBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
- return DoomEntriesBetween(Time(), Time(), callback);
+ if (DoomAllEntries())
+ return net::OK;
+
+ return net::ERR_FAILED;
}
-int MemBackendImpl::DoomEntriesBetween(Time initial_time,
- Time end_time,
+int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
const CompletionCallback& callback) {
- if (end_time.is_null())
- end_time = Time::Max();
-
- DCHECK_GE(end_time, initial_time);
-
- base::LinkNode<MemEntryImpl>* node = lru_list_.head();
- while (node != lru_list_.end() && node->value()->GetLastUsed() < initial_time)
- node = node->next();
- while (node != lru_list_.end() && node->value()->GetLastUsed() < end_time) {
- MemEntryImpl* to_doom = node->value();
- node = node->next();
- to_doom->Doom();
- }
+ if (DoomEntriesBetween(initial_time, end_time))
+ return net::OK;
- return net::OK;
+ return net::ERR_FAILED;
}
-int MemBackendImpl::DoomEntriesSince(Time initial_time,
+int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
const CompletionCallback& callback) {
- return DoomEntriesBetween(initial_time, Time::Max(), callback);
+ if (DoomEntriesSince(initial_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
}
int MemBackendImpl::CalculateSizeOfAllEntries(
@@ -204,41 +189,35 @@ int MemBackendImpl::CalculateSizeOfAllEntries(
return current_size_;
}
-class MemBackendImpl::MemIterator final : public Backend::Iterator {
+class MemBackendImpl::MemIterator : public Backend::Iterator {
public:
explicit MemIterator(base::WeakPtr<MemBackendImpl> backend)
- : backend_(backend), current_(nullptr) {}
+ : backend_(backend), current_(NULL) {
+ }
int OpenNextEntry(Entry** next_entry,
const CompletionCallback& callback) override {
if (!backend_)
return net::ERR_FAILED;
- // Iterate using |lru_list_|, from most recently used to least recently
- // used, for compatibility with the unit tests that assume this behaviour.
- // Consider the last element if we are beginning an iteration, otherwise
- // progressively move earlier in the LRU list.
- current_ = current_ ? current_->previous() : backend_->lru_list_.tail();
-
+ MemEntryImpl* node = backend_->rankings_.GetNext(current_);
// We should never return a child entry so iterate until we hit a parent
// entry.
- while (current_ != backend_->lru_list_.end() &&
- current_->value()->type() != MemEntryImpl::PARENT_ENTRY) {
- current_ = current_->previous();
- }
- if (current_ == backend_->lru_list_.end()) {
- *next_entry = nullptr;
- return net::ERR_FAILED;
+ while (node && node->type() != MemEntryImpl::kParentEntry)
+ node = backend_->rankings_.GetNext(node);
+ *next_entry = node;
+ current_ = node;
+
+ if (node) {
+ node->Open();
+ return net::OK;
}
-
- current_->value()->Open();
- *next_entry = current_->value();
- return net::OK;
+ return net::ERR_FAILED;
}
private:
base::WeakPtr<MemBackendImpl> backend_;
- base::LinkNode<MemEntryImpl>* current_;
+ MemEntryImpl* current_;
};
scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
@@ -248,23 +227,127 @@ scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() {
void MemBackendImpl::OnExternalCacheHit(const std::string& key) {
EntryMap::iterator it = entries_.find(key);
+ if (it != entries_.end()) {
+ UpdateRank(it->second);
+ }
+}
+
+bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it == entries_.end())
+ return false;
+
+ it->second->Open();
+
+ *entry = it->second;
+ return true;
+}
+
+bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
if (it != entries_.end())
- it->second->UpdateStateOnUse(MemEntryImpl::ENTRY_WAS_NOT_MODIFIED);
+ return false;
+
+ MemEntryImpl* cache_entry = new MemEntryImpl(this);
+ if (!cache_entry->CreateEntry(key, net_log_)) {
+ delete entry;
+ return false;
+ }
+
+ rankings_.Insert(cache_entry);
+ entries_[key] = cache_entry;
+
+ *entry = cache_entry;
+ return true;
}
-void MemBackendImpl::EvictIfNeeded() {
- if (current_size_ <= max_size_)
- return;
+bool MemBackendImpl::DoomEntry(const std::string& key) {
+ Entry* entry;
+ if (!OpenEntry(key, &entry))
+ return false;
- int target_size = std::max(0, max_size_ - kDefaultEvictionSize);
+ entry->Doom();
+ entry->Close();
+ return true;
+}
+
+bool MemBackendImpl::DoomAllEntries() {
+ TrimCache(true);
+ return true;
+}
+
+bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
+ const Time end_time) {
+ if (end_time.is_null())
+ return DoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ MemEntryImpl* node = rankings_.GetNext(NULL);
+ // Last valid entry before |node|.
+ // Note, that entries after |node| may become invalid during |node| doom in
+ // case when they are child entries of it. It is guaranteed that
+ // parent node will go prior to it childs in ranking list (see
+ // InternalReadSparseData and InternalWriteSparseData).
+ MemEntryImpl* last_valid = NULL;
+
+ // rankings_ is ordered by last used, this will descend through the cache
+ // and start dooming items before the end_time, and will stop once it reaches
+ // an item used before the initial time.
+ while (node) {
+ if (node->GetLastUsed() < initial_time)
+ break;
+
+ if (node->GetLastUsed() < end_time)
+ node->Doom();
+ else
+ last_valid = node;
+ node = rankings_.GetNext(last_valid);
+ }
+
+ return true;
+}
+
+bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
+ for (;;) {
+ // Get the entry in the front.
+ Entry* entry = rankings_.GetNext(NULL);
- base::LinkNode<MemEntryImpl>* entry = lru_list_.head();
- while (current_size_ > target_size && entry != lru_list_.end()) {
- MemEntryImpl* to_doom = entry->value();
- entry = entry->next();
- if (!to_doom->InUse())
- to_doom->Doom();
+ // Break the loop when there are no more entries or the entry is too old.
+ if (!entry || entry->GetLastUsed() < initial_time)
+ return true;
+ entry->Doom();
}
}
+void MemBackendImpl::TrimCache(bool empty) {
+ MemEntryImpl* next = rankings_.GetPrev(NULL);
+ if (!next)
+ return;
+
+ int target_size = empty ? 0 : LowWaterAdjust(max_size_);
+ while (current_size_ > target_size && next) {
+ MemEntryImpl* node = next;
+ next = rankings_.GetPrev(next);
+ if (!node->InUse() || empty) {
+ node->Doom();
+ }
+ }
+
+ return;
+}
+
+void MemBackendImpl::AddStorageSize(int32_t bytes) {
+ current_size_ += bytes;
+ DCHECK_GE(current_size_, 0);
+
+ if (current_size_ > max_size_)
+ TrimCache(false);
+}
+
+void MemBackendImpl::SubstractStorageSize(int32_t bytes) {
+ current_size_ -= bytes;
+ DCHECK_GE(current_size_, 0);
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/memory/mem_backend_impl.h b/net/disk_cache/memory/mem_backend_impl.h
index 8abcca5..e2a9f1a 100644
--- a/net/disk_cache/memory/mem_backend_impl.h
+++ b/net/disk_cache/memory/mem_backend_impl.h
@@ -9,17 +9,13 @@
#include <stdint.h>
-#include <string>
-
#include "base/compiler_specific.h"
#include "base/containers/hash_tables.h"
-#include "base/containers/linked_list.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/strings/string_split.h"
-#include "base/time/time.h"
#include "net/disk_cache/disk_cache.h"
-#include "net/disk_cache/memory/mem_entry_impl.h"
+#include "net/disk_cache/memory/mem_rankings.h"
namespace net {
class NetLog;
@@ -27,9 +23,11 @@ class NetLog;
namespace disk_cache {
+class MemEntryImpl;
+
// This class implements the Backend interface. An object of this class handles
// the operations of the cache without writing to disk.
-class NET_EXPORT_PRIVATE MemBackendImpl final : public Backend {
+class NET_EXPORT_PRIVATE MemBackendImpl : public Backend {
public:
explicit MemBackendImpl(net::NetLog* net_log);
~MemBackendImpl() override;
@@ -47,29 +45,26 @@ class NET_EXPORT_PRIVATE MemBackendImpl final : public Backend {
// Sets the maximum size for the total amount of data stored by this instance.
bool SetMaxSize(int max_bytes);
- // Returns the maximum size for a file to reside on the cache.
- int MaxFileSize() const;
+ // Permanently deletes an entry.
+ void InternalDoomEntry(MemEntryImpl* entry);
- // These next methods (before the implementation of the Backend interface) are
- // called by MemEntryImpl to update the state of the backend during the entry
- // lifecycle.
+ // Updates the ranking information for an entry.
+ void UpdateRank(MemEntryImpl* node);
- // Signals that new entry has been created, and should be placed in
- // |lru_list_| so that it is eligable for eviction.
- void OnEntryInserted(MemEntryImpl* entry);
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32_t old_size, int32_t new_size);
- // Signals that an entry has been updated, and thus should be moved to the end
- // of |lru_list_|.
- void OnEntryUpdated(MemEntryImpl* entry);
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
- // Signals that an entry has been doomed, and so it should be removed from the
- // list of active entries as appropriate, as well as removed from the
- // |lru_list_|.
- void OnEntryDoomed(MemEntryImpl* entry);
+ // Insert an MemEntryImpl into the ranking list. This method is only called
+ // from MemEntryImpl to insert child entries. The reference can be removed
+ // by calling RemoveFromRankingList(|entry|).
+ void InsertIntoRankingList(MemEntryImpl* entry);
- // Adjust the current size of this backend by |delta|. This is used to
- // determine if eviction is neccessary and when eviction is finished.
- void ModifyStorageSize(int32_t delta);
+ // Remove |entry| from ranking list. This method is only called from
+ // MemEntryImpl to remove a child entry from the ranking list.
+ void RemoveFromRankingList(MemEntryImpl* entry);
// Backend interface.
net::CacheType GetCacheType() const override;
@@ -99,15 +94,26 @@ class NET_EXPORT_PRIVATE MemBackendImpl final : public Backend {
typedef base::hash_map<std::string, MemEntryImpl*> EntryMap;
- // Deletes entries from the cache until the current size is below the limit.
- void EvictIfNeeded();
+ // Old Backend interface.
+ bool OpenEntry(const std::string& key, Entry** entry);
+ bool CreateEntry(const std::string& key, Entry** entry);
+ bool DoomEntry(const std::string& key);
+ bool DoomAllEntries();
+ bool DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+ bool DoomEntriesSince(const base::Time initial_time);
- EntryMap entries_;
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
- // Stored in increasing order of last use time, from least recently used to
- // most recently used.
- base::LinkedList<MemEntryImpl> lru_list_;
+ // Handles the used storage count.
+ void AddStorageSize(int32_t bytes);
+ void SubstractStorageSize(int32_t bytes);
+ EntryMap entries_;
+ MemRankings rankings_; // Rankings to be able to trim the cache.
int32_t max_size_; // Maximum data size for this instance.
int32_t current_size_;
diff --git a/net/disk_cache/memory/mem_entry_impl.cc b/net/disk_cache/memory/mem_entry_impl.cc
index c28a7fa..6925518 100644
--- a/net/disk_cache/memory/mem_entry_impl.cc
+++ b/net/disk_cache/memory/mem_entry_impl.cc
@@ -4,7 +4,6 @@
#include "net/disk_cache/memory/mem_entry_impl.h"
-#include <algorithm>
#include <utility>
#include "base/bind.h"
@@ -18,8 +17,6 @@
using base::Time;
-namespace disk_cache {
-
namespace {
const int kSparseData = 1;
@@ -31,12 +28,12 @@ const int kMaxSparseEntryBits = 12;
const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits;
// Convert global offset to child index.
-int ToChildIndex(int64_t offset) {
+inline int ToChildIndex(int64_t offset) {
return static_cast<int>(offset >> kMaxSparseEntryBits);
}
// Convert global offset to offset in child entry.
-int ToChildOffset(int64_t offset) {
+inline int ToChildOffset(int64_t offset) {
return static_cast<int>(offset & (kMaxSparseEntrySize - 1));
}
@@ -48,110 +45,129 @@ std::string GenerateChildName(const std::string& base_name, int child_id) {
return base::StringPrintf("Range_%s:%i", base_name.c_str(), child_id);
}
-// Returns NetLog parameters for the creation of a MemEntryImpl. A separate
-// function is needed because child entries don't store their key().
-scoped_ptr<base::Value> NetLogEntryCreationCallback(
- const MemEntryImpl* entry,
+// Returns NetLog parameters for the creation of a child MemEntryImpl. Separate
+// function needed because child entries don't suppport GetKey().
+scoped_ptr<base::Value> NetLogChildEntryCreationCallback(
+ const disk_cache::MemEntryImpl* parent,
+ int child_id,
net::NetLogCaptureMode /* capture_mode */) {
scoped_ptr<base::DictionaryValue> dict(new base::DictionaryValue());
- std::string key;
- switch (entry->type()) {
- case MemEntryImpl::PARENT_ENTRY:
- key = entry->key();
- break;
- case MemEntryImpl::CHILD_ENTRY:
- key = GenerateChildName(entry->parent()->key(), entry->child_id());
- break;
- }
- dict->SetString("key", key);
+ dict->SetString("key", GenerateChildName(parent->GetKey(), child_id));
dict->SetBoolean("created", true);
return std::move(dict);
}
} // namespace
-MemEntryImpl::MemEntryImpl(MemBackendImpl* backend,
- const std::string& key,
- net::NetLog* net_log)
- : MemEntryImpl(backend,
- key,
- 0, // child_id
- nullptr, // parent
- net_log) {
+namespace disk_cache {
+
+MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
+ doomed_ = false;
+ backend_ = backend;
+ ref_count_ = 0;
+ parent_ = NULL;
+ child_id_ = 0;
+ child_first_pos_ = 0;
+ next_ = NULL;
+ prev_ = NULL;
+ for (int i = 0; i < NUM_STREAMS; i++)
+ data_size_[i] = 0;
+}
+
+// ------------------------------------------------------------------------
+
+bool MemEntryImpl::CreateEntry(const std::string& key, net::NetLog* net_log) {
+ key_ = key;
+ Time current = Time::Now();
+ last_modified_ = current;
+ last_used_ = current;
+
+ net_log_ = net::BoundNetLog::Make(net_log,
+ net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
+ // Must be called after |key_| is set, so GetKey() works.
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
+ CreateNetLogEntryCreationCallback(this, true));
+
Open();
- backend_->ModifyStorageSize(GetStorageSize());
+ backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size()));
+ return true;
}
-MemEntryImpl::MemEntryImpl(MemBackendImpl* backend,
- int child_id,
- MemEntryImpl* parent,
- net::NetLog* net_log)
- : MemEntryImpl(backend,
- std::string(), // key
- child_id,
- parent,
- net_log) {
- (*parent_->children_)[child_id] = this;
+void MemEntryImpl::InternalDoom() {
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
+ doomed_ = true;
+ if (!ref_count_) {
+ if (type() == kParentEntry) {
+ // If this is a parent entry, we need to doom all the child entries.
+ if (children_.get()) {
+ EntryMap children;
+ children.swap(*children_);
+ for (EntryMap::iterator i = children.begin();
+ i != children.end(); ++i) {
+ // Since a pointer to this object is also saved in the map, avoid
+ // dooming it.
+ if (i->second != this)
+ i->second->Doom();
+ }
+ DCHECK(children_->empty());
+ }
+ } else {
+ // If this is a child entry, detach it from the parent.
+ parent_->DetachChild(child_id_);
+ }
+ delete this;
+ }
}
void MemEntryImpl::Open() {
// Only a parent entry can be opened.
- DCHECK_EQ(PARENT_ENTRY, type());
- ++ref_count_;
- DCHECK_GE(ref_count_, 1);
+ // TODO(hclam): make sure it's correct to not apply the concept of ref
+ // counting to child entry.
+ DCHECK(type() == kParentEntry);
+ ref_count_++;
+ DCHECK_GE(ref_count_, 0);
DCHECK(!doomed_);
}
-bool MemEntryImpl::InUse() const {
- if (type() == PARENT_ENTRY) {
+bool MemEntryImpl::InUse() {
+ if (type() == kParentEntry) {
return ref_count_ > 0;
} else {
- // TODO(gavinp): Can't this just be a DCHECK? How would ref_count_ not be
- // zero?
-
- // A child entry is never in use. Thus one can always be evicted, even while
- // its parent entry is open and in use.
+ // A child entry is always not in use. The consequence is that a child entry
+ // can always be evicted while the associated parent entry is currently in
+ // used (i.e. opened).
return false;
}
}
-int MemEntryImpl::GetStorageSize() const {
- int storage_size = static_cast<int32_t>(key_.size());
- for (const auto& i : data_)
- storage_size += i.size();
- return storage_size;
-}
-
-void MemEntryImpl::UpdateStateOnUse(EntryModified modified_enum) {
- if (!doomed_)
- backend_->OnEntryUpdated(this);
-
- last_used_ = Time::Now();
- if (modified_enum == ENTRY_WAS_MODIFIED)
- last_modified_ = last_used_;
-}
+// ------------------------------------------------------------------------
void MemEntryImpl::Doom() {
- if (!doomed_) {
- doomed_ = true;
- backend_->OnEntryDoomed(this);
- net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
+ if (doomed_)
+ return;
+ if (type() == kParentEntry) {
+ // Perform internal doom from the backend if this is a parent entry.
+ backend_->InternalDoomEntry(this);
+ } else {
+ // Manually detach from the backend and perform internal doom.
+ backend_->RemoveFromRankingList(this);
+ InternalDoom();
}
- if (!ref_count_)
- delete this;
}
void MemEntryImpl::Close() {
- DCHECK_EQ(PARENT_ENTRY, type());
- --ref_count_;
+ // Only a parent entry can be closed.
+ DCHECK(type() == kParentEntry);
+ ref_count_--;
DCHECK_GE(ref_count_, 0);
if (!ref_count_ && doomed_)
- delete this;
+ InternalDoom();
}
std::string MemEntryImpl::GetKey() const {
// A child entry doesn't have key so this method should not be called.
- DCHECK_EQ(PARENT_ENTRY, type());
+ DCHECK(type() == kParentEntry);
return key_;
}
@@ -164,9 +180,9 @@ Time MemEntryImpl::GetLastModified() const {
}
int32_t MemEntryImpl::GetDataSize(int index) const {
- if (index < 0 || index >= kNumStreams)
+ if (index < 0 || index >= NUM_STREAMS)
return 0;
- return data_[index].size();
+ return data_size_[index];
}
int MemEntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
@@ -244,7 +260,7 @@ int MemEntryImpl::GetAvailableRange(int64_t offset,
net::NetLog::TYPE_SPARSE_GET_RANGE,
CreateNetLogSparseOperationCallback(offset, len));
}
- int result = InternalGetAvailableRange(offset, len, start);
+ int result = GetAvailableRange(offset, len, start);
if (net_log_.IsCapturing()) {
net_log_.EndEvent(
net::NetLog::TYPE_SPARSE_GET_RANGE,
@@ -254,8 +270,8 @@ int MemEntryImpl::GetAvailableRange(int64_t offset,
}
bool MemEntryImpl::CouldBeSparse() const {
- DCHECK_EQ(PARENT_ENTRY, type());
- return (children_.get() != nullptr);
+ DCHECK_EQ(kParentEntry, type());
+ return (children_.get() != NULL);
}
int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
@@ -264,73 +280,41 @@ int MemEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
// ------------------------------------------------------------------------
-MemEntryImpl::MemEntryImpl(MemBackendImpl* backend,
- const ::std::string& key,
- int child_id,
- MemEntryImpl* parent,
- net::NetLog* net_log)
- : key_(key),
- ref_count_(0),
- child_id_(child_id),
- child_first_pos_(0),
- parent_(parent),
- last_modified_(Time::Now()),
- last_used_(last_modified_),
- backend_(backend),
- doomed_(false) {
- backend_->OnEntryInserted(this);
- net_log_ =
- net::BoundNetLog::Make(net_log, net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
- net_log_.BeginEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
- base::Bind(&NetLogEntryCreationCallback, this));
-}
-
MemEntryImpl::~MemEntryImpl() {
- backend_->ModifyStorageSize(-GetStorageSize());
-
- if (type() == PARENT_ENTRY) {
- if (children_) {
- EntryMap children;
- children_->swap(children);
-
- for (auto& it : children) {
- // Since |this| is stored in the map, it should be guarded against
- // double dooming, which will result in double destruction.
- if (it.second != this)
- it.second->Doom();
- }
- }
- } else {
- parent_->children_->erase(child_id_);
- }
+ for (int i = 0; i < NUM_STREAMS; i++)
+ backend_->ModifyStorageSize(data_size_[i], 0);
+ backend_->ModifyStorageSize(static_cast<int32_t>(key_.size()), 0);
net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL);
}
int MemEntryImpl::InternalReadData(int index, int offset, IOBuffer* buf,
int buf_len) {
- DCHECK(type() == PARENT_ENTRY || index == kSparseData);
+ DCHECK(type() == kParentEntry || index == kSparseData);
- if (index < 0 || index >= kNumStreams || buf_len < 0)
+ if (index < 0 || index >= NUM_STREAMS)
return net::ERR_INVALID_ARGUMENT;
- int entry_size = data_[index].size();
+ int entry_size = GetDataSize(index);
if (offset >= entry_size || offset < 0 || !buf_len)
return 0;
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
if (offset + buf_len > entry_size)
buf_len = entry_size - offset;
- UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
- std::copy(data_[index].begin() + offset,
- data_[index].begin() + offset + buf_len, buf->data());
+ UpdateRank(false);
+
+ memcpy(buf->data(), &(data_[index])[offset], buf_len);
return buf_len;
}
int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
int buf_len, bool truncate) {
- DCHECK(type() == PARENT_ENTRY || index == kSparseData);
+ DCHECK(type() == kParentEntry || index == kSparseData);
- if (index < 0 || index >= kNumStreams)
+ if (index < 0 || index >= NUM_STREAMS)
return net::ERR_INVALID_ARGUMENT;
if (offset < 0 || buf_len < 0)
@@ -344,32 +328,34 @@ int MemEntryImpl::InternalWriteData(int index, int offset, IOBuffer* buf,
return net::ERR_FAILED;
}
- int old_data_size = data_[index].size();
- if (truncate || old_data_size < offset + buf_len) {
- data_[index].resize(offset + buf_len);
+ // Read the size at this point.
+ int entry_size = GetDataSize(index);
- // Zero fill any hole.
- if (old_data_size < offset) {
- std::fill(data_[index].begin() + old_data_size,
- data_[index].begin() + offset, 0);
- }
+ PrepareTarget(index, offset, buf_len);
- backend_->ModifyStorageSize(data_[index].size() - old_data_size);
+ if (entry_size < offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ } else if (truncate) {
+ if (entry_size > offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ }
}
- UpdateStateOnUse(ENTRY_WAS_MODIFIED);
+ UpdateRank(true);
if (!buf_len)
return 0;
- std::copy(buf->data(), buf->data() + buf_len, data_[index].begin() + offset);
+ memcpy(&(data_[index])[offset], buf->data(), buf_len);
return buf_len;
}
int MemEntryImpl::InternalReadSparseData(int64_t offset,
IOBuffer* buf,
int buf_len) {
- DCHECK_EQ(PARENT_ENTRY, type());
+ DCHECK(type() == kParentEntry);
if (!InitSparseInfo())
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
@@ -383,7 +369,7 @@ int MemEntryImpl::InternalReadSparseData(int64_t offset,
// Iterate until we have read enough.
while (io_buf->BytesRemaining()) {
- MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), false);
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false);
// No child present for that offset.
if (!child)
@@ -399,7 +385,7 @@ int MemEntryImpl::InternalReadSparseData(int64_t offset,
if (net_log_.IsCapturing()) {
net_log_.BeginEvent(
net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
- CreateNetLogSparseReadWriteCallback(child->net_log_.source(),
+ CreateNetLogSparseReadWriteCallback(child->net_log().source(),
io_buf->BytesRemaining()));
}
int ret = child->ReadData(kSparseData, child_offset, io_buf.get(),
@@ -419,14 +405,15 @@ int MemEntryImpl::InternalReadSparseData(int64_t offset,
io_buf->DidConsume(ret);
}
- UpdateStateOnUse(ENTRY_WAS_NOT_MODIFIED);
+ UpdateRank(false);
+
return io_buf->BytesConsumed();
}
int MemEntryImpl::InternalWriteSparseData(int64_t offset,
IOBuffer* buf,
int buf_len) {
- DCHECK_EQ(PARENT_ENTRY, type());
+ DCHECK(type() == kParentEntry);
if (!InitSparseInfo())
return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
@@ -442,7 +429,7 @@ int MemEntryImpl::InternalWriteSparseData(int64_t offset,
// child entry until all |buf_len| bytes are written. The write operation can
// start in the middle of an entry.
while (io_buf->BytesRemaining()) {
- MemEntryImpl* child = GetChild(offset + io_buf->BytesConsumed(), true);
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true);
int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
// Find the right amount to write, this evaluates the remaining bytes to
@@ -454,9 +441,10 @@ int MemEntryImpl::InternalWriteSparseData(int64_t offset,
int data_size = child->GetDataSize(kSparseData);
if (net_log_.IsCapturing()) {
- net_log_.BeginEvent(net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
- CreateNetLogSparseReadWriteCallback(
- child->net_log_.source(), write_len));
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
+ CreateNetLogSparseReadWriteCallback(child->net_log().source(),
+ write_len));
}
// Always writes to the child entry. This operation may overwrite data
@@ -484,14 +472,13 @@ int MemEntryImpl::InternalWriteSparseData(int64_t offset,
io_buf->DidConsume(ret);
}
- UpdateStateOnUse(ENTRY_WAS_MODIFIED);
+ UpdateRank(true);
+
return io_buf->BytesConsumed();
}
-int MemEntryImpl::InternalGetAvailableRange(int64_t offset,
- int len,
- int64_t* start) {
- DCHECK_EQ(PARENT_ENTRY, type());
+int MemEntryImpl::GetAvailableRange(int64_t offset, int len, int64_t* start) {
+ DCHECK(type() == kParentEntry);
DCHECK(start);
if (!InitSparseInfo())
@@ -500,7 +487,7 @@ int MemEntryImpl::InternalGetAvailableRange(int64_t offset,
if (offset < 0 || len < 0 || !start)
return net::ERR_INVALID_ARGUMENT;
- MemEntryImpl* current_child = nullptr;
+ MemEntryImpl* current_child = NULL;
// Find the first child and record the number of empty bytes.
int empty = FindNextChild(offset, len, &current_child);
@@ -534,10 +521,38 @@ int MemEntryImpl::InternalGetAvailableRange(int64_t offset,
return 0;
}
+void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
+ int entry_size = GetDataSize(index);
+
+ if (entry_size >= offset + buf_len)
+ return; // Not growing the stored data.
+
+ if (static_cast<int>(data_[index].size()) < offset + buf_len)
+ data_[index].resize(offset + buf_len);
+
+ if (offset <= entry_size)
+ return; // There is no "hole" on the stored data.
+
+ // Cleanup the hole not written by the user. The point is to avoid returning
+ // random stuff later on.
+ memset(&(data_[index])[entry_size], 0, offset - entry_size);
+}
+
+void MemEntryImpl::UpdateRank(bool modified) {
+ Time current = Time::Now();
+ last_used_ = current;
+
+ if (modified)
+ last_modified_ = current;
+
+ if (!doomed_)
+ backend_->UpdateRank(this);
+}
+
bool MemEntryImpl::InitSparseInfo() {
- DCHECK_EQ(PARENT_ENTRY, type());
+ DCHECK(type() == kParentEntry);
- if (!children_) {
+ if (!children_.get()) {
// If we already have some data in sparse stream but we are being
// initialized as a sparse entry, we should fail.
if (GetDataSize(kSparseData))
@@ -551,27 +566,52 @@ bool MemEntryImpl::InitSparseInfo() {
return true;
}
-MemEntryImpl* MemEntryImpl::GetChild(int64_t offset, bool create) {
- DCHECK_EQ(PARENT_ENTRY, type());
+bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id,
+ net::NetLog* net_log) {
+ DCHECK(!parent_);
+ DCHECK(!child_id_);
+
+ net_log_ = net::BoundNetLog::Make(net_log,
+ net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
+ base::Bind(&NetLogChildEntryCreationCallback, parent, child_id_));
+
+ parent_ = parent;
+ child_id_ = child_id;
+ Time current = Time::Now();
+ last_modified_ = current;
+ last_used_ = current;
+ // Insert this to the backend's ranking list.
+ backend_->InsertIntoRankingList(this);
+ return true;
+}
+
+MemEntryImpl* MemEntryImpl::OpenChild(int64_t offset, bool create) {
+ DCHECK(type() == kParentEntry);
int index = ToChildIndex(offset);
EntryMap::iterator i = children_->find(index);
- if (i != children_->end())
+ if (i != children_->end()) {
return i->second;
- if (create)
- return new MemEntryImpl(backend_, index, this, net_log_.net_log());
- return nullptr;
+ } else if (create) {
+ MemEntryImpl* child = new MemEntryImpl(backend_);
+ child->InitChildEntry(this, index, net_log_.net_log());
+ (*children_)[index] = child;
+ return child;
+ }
+ return NULL;
}
int MemEntryImpl::FindNextChild(int64_t offset, int len, MemEntryImpl** child) {
DCHECK(child);
- *child = nullptr;
+ *child = NULL;
int scanned_len = 0;
// This loop tries to find the first existing child.
while (scanned_len < len) {
// This points to the current offset in the child.
int current_child_offset = ToChildOffset(offset + scanned_len);
- MemEntryImpl* current_child = GetChild(offset + scanned_len, false);
+ MemEntryImpl* current_child = OpenChild(offset + scanned_len, false);
if (current_child) {
int child_first_pos = current_child->child_first_pos_;
@@ -594,4 +634,8 @@ int MemEntryImpl::FindNextChild(int64_t offset, int len, MemEntryImpl** child) {
return scanned_len;
}
+void MemEntryImpl::DetachChild(int child_id) {
+ children_->erase(child_id);
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/memory/mem_entry_impl.h b/net/disk_cache/memory/mem_entry_impl.h
index 11634e6..52a1696 100644
--- a/net/disk_cache/memory/mem_entry_impl.h
+++ b/net/disk_cache/memory/mem_entry_impl.h
@@ -7,15 +7,9 @@
#include <stdint.h>
-#include <string>
-#include <vector>
-
#include "base/containers/hash_tables.h"
-#include "base/containers/linked_list.h"
-#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/scoped_ptr.h"
-#include "base/time/time.h"
#include "net/disk_cache/disk_cache.h"
#include "net/log/net_log.h"
@@ -24,8 +18,8 @@ namespace disk_cache {
class MemBackendImpl;
// This class implements the Entry interface for the memory-only cache. An
-// object of this class represents a single entry on the cache. We use two types
-// of entries, parent and child to support sparse caching.
+// object of this class represents a single entry on the cache. We use two
+// types of entries, parent and child to support sparse caching.
//
// A parent entry is non-sparse until a sparse method is invoked (i.e.
// ReadSparseData, WriteSparseData, GetAvailableRange) when sparse information
@@ -37,62 +31,64 @@ class MemBackendImpl;
// ReadData and WriteData cannot be applied to them. The lifetime of a child
// entry is managed by the parent entry that created it except that the entry
// can be evicted independently. A child entry does not have a key and it is not
-// registered in the backend's entry map.
+// registered in the backend's entry map. It is registered in the backend's
+// ranking list to enable eviction of a partial content.
//
-// A sparse child entry has a fixed maximum size and can be partially
-// filled. There can only be one continous filled region in a sparse entry, as
-// illustrated by the following example:
+// A sparse entry has a fixed maximum size and can be partially filled. There
+// can only be one continous filled region in a sparse entry, as illustrated by
+// the following example:
// | xxx ooooo |
// x = unfilled region
// o = filled region
-// It is guaranteed that there is at most one unfilled region and one filled
+// It is guranteed that there is at most one unfilled region and one filled
// region, and the unfilled region (if there is one) is always before the filled
// region. The book keeping for filled region in a sparse entry is done by using
-// the variable |child_first_pos_|.
+// the variable |child_first_pos_| (inclusive).
-class NET_EXPORT_PRIVATE MemEntryImpl final
- : public Entry,
- public base::LinkNode<MemEntryImpl> {
+class MemEntryImpl : public Entry {
public:
enum EntryType {
- PARENT_ENTRY,
- CHILD_ENTRY,
+ kParentEntry,
+ kChildEntry,
};
- // Provided to better document calls to |UpdateStateOnUse()|.
- enum EntryModified {
- ENTRY_WAS_NOT_MODIFIED,
- ENTRY_WAS_MODIFIED,
- };
+ explicit MemEntryImpl(MemBackendImpl* backend);
- // Constructor for parent entries.
- MemEntryImpl(MemBackendImpl* backend,
- const std::string& key,
- net::NetLog* net_log);
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(const std::string& key, net::NetLog* net_log);
- // Constructor for child entries.
- MemEntryImpl(MemBackendImpl* backend,
- int child_id,
- MemEntryImpl* parent,
- net::NetLog* net_log);
+ // Permanently destroys this entry.
+ void InternalDoom();
void Open();
- bool InUse() const;
+ bool InUse();
+
+ MemEntryImpl* next() const {
+ return next_;
+ }
- EntryType type() const { return parent_ ? CHILD_ENTRY : PARENT_ENTRY; }
- const std::string& key() const { return key_; }
- const MemEntryImpl* parent() const { return parent_; }
- int child_id() const { return child_id_; }
- base::Time last_used() const { return last_used_; }
+ MemEntryImpl* prev() const {
+ return prev_;
+ }
- // The in-memory size of this entry to use for the purposes of eviction.
- int GetStorageSize() const;
+ void set_next(MemEntryImpl* next) {
+ next_ = next;
+ }
- // Update an entry's position in the backend LRU list and set |last_used_|. If
- // the entry was modified, also update |last_modified_|.
- void UpdateStateOnUse(EntryModified modified_enum);
+ void set_prev(MemEntryImpl* prev) {
+ prev_ = prev;
+ }
- // From disk_cache::Entry:
+ EntryType type() const {
+ return parent_ ? kChildEntry : kParentEntry;
+ }
+
+ const net::BoundNetLog& net_log() {
+ return net_log_;
+ }
+
+ // Entry interface.
void Doom() override;
void Close() override;
std::string GetKey() const override;
@@ -127,15 +123,11 @@ class NET_EXPORT_PRIVATE MemEntryImpl final
int ReadyForSparseIO(const CompletionCallback& callback) override;
private:
- MemEntryImpl(MemBackendImpl* backend,
- const std::string& key,
- int child_id,
- MemEntryImpl* parent,
- net::NetLog* net_log);
-
typedef base::hash_map<int, MemEntryImpl*> EntryMap;
- static const int kNumStreams = 3;
+ enum {
+ NUM_STREAMS = 3
+ };
~MemEntryImpl() override;
@@ -146,35 +138,53 @@ class NET_EXPORT_PRIVATE MemEntryImpl final
bool truncate);
int InternalReadSparseData(int64_t offset, IOBuffer* buf, int buf_len);
int InternalWriteSparseData(int64_t offset, IOBuffer* buf, int buf_len);
- int InternalGetAvailableRange(int64_t offset, int len, int64_t* start);
+
+ // Old Entry interface.
+ int GetAvailableRange(int64_t offset, int len, int64_t* start);
+
+ // Grows and cleans up the data buffer.
+ void PrepareTarget(int index, int offset, int buf_len);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
// Initializes the children map and sparse info. This method is only called
// on a parent entry.
bool InitSparseInfo();
+ // Performs the initialization of a MemEntryImpl as a child entry.
+ // |parent| is the pointer to the parent entry. |child_id| is the ID of
+ // the new child.
+ bool InitChildEntry(MemEntryImpl* parent, int child_id, net::NetLog* net_log);
+
// Returns an entry responsible for |offset|. The returned entry can be a
// child entry or this entry itself if |offset| points to the first range.
// If such entry does not exist and |create| is true, a new child entry is
// created.
- MemEntryImpl* GetChild(int64_t offset, bool create);
+ MemEntryImpl* OpenChild(int64_t offset, bool create);
// Finds the first child located within the range [|offset|, |offset + len|).
// Returns the number of bytes ahead of |offset| to reach the first available
// bytes in the entry. The first child found is output to |child|.
int FindNextChild(int64_t offset, int len, MemEntryImpl** child);
+ // Removes child indexed by |child_id| from the children map.
+ void DetachChild(int child_id);
+
std::string key_;
- std::vector<char> data_[kNumStreams]; // User data.
+ std::vector<char> data_[NUM_STREAMS]; // User data.
+ int32_t data_size_[NUM_STREAMS];
int ref_count_;
int child_id_; // The ID of a child entry.
int child_first_pos_; // The position of the first byte in a child
// entry.
- // Pointer to the parent entry, or nullptr if this entry is a parent entry.
- MemEntryImpl* parent_;
+ MemEntryImpl* next_; // Pointers for the LRU list.
+ MemEntryImpl* prev_;
+ MemEntryImpl* parent_; // Pointer to the parent entry.
scoped_ptr<EntryMap> children_;
- base::Time last_modified_;
+ base::Time last_modified_; // LRU information.
base::Time last_used_;
MemBackendImpl* backend_; // Back pointer to the cache.
bool doomed_; // True if this entry was removed from the cache.
diff --git a/net/disk_cache/memory/mem_rankings.cc b/net/disk_cache/memory/mem_rankings.cc
new file mode 100644
index 0000000..ba5e00b
--- /dev/null
+++ b/net/disk_cache/memory/mem_rankings.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/memory/mem_rankings.h"
+
+#include "base/logging.h"
+#include "net/disk_cache/memory/mem_entry_impl.h"
+
+namespace disk_cache {
+
+MemRankings::~MemRankings() {
+ DCHECK(!head_ && !tail_);
+}
+
+void MemRankings::Insert(MemEntryImpl* node) {
+ if (head_)
+ head_->set_prev(node);
+
+ if (!tail_)
+ tail_ = node;
+
+ node->set_prev(NULL);
+ node->set_next(head_);
+ head_ = node;
+}
+
+void MemRankings::Remove(MemEntryImpl* node) {
+ MemEntryImpl* prev = node->prev();
+ MemEntryImpl* next = node->next();
+
+ if (head_ == node)
+ head_ = next;
+
+ if (tail_ == node)
+ tail_ = prev;
+
+ if (prev)
+ prev->set_next(next);
+
+ if (next)
+ next->set_prev(prev);
+
+ node->set_next(NULL);
+ node->set_prev(NULL);
+}
+
+void MemRankings::UpdateRank(MemEntryImpl* node) {
+ Remove(node);
+ Insert(node);
+}
+
+MemEntryImpl* MemRankings::GetNext(MemEntryImpl* node) {
+ if (!node)
+ return head_;
+
+ return node->next();
+}
+
+MemEntryImpl* MemRankings::GetPrev(MemEntryImpl* node) {
+ if (!node)
+ return tail_;
+
+ return node->prev();
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/memory/mem_rankings.h b/net/disk_cache/memory/mem_rankings.h
new file mode 100644
index 0000000..d660cab
--- /dev/null
+++ b/net/disk_cache/memory/mem_rankings.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_MEMORY_MEM_RANKINGS_H_
+#define NET_DISK_CACHE_MEMORY_MEM_RANKINGS_H_
+
+#include "base/macros.h"
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class handles the ranking information for the memory-only cache.
+class MemRankings {
+ public:
+ MemRankings() : head_(NULL), tail_(NULL) {}
+ ~MemRankings();
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(MemEntryImpl* node);
+
+ // Removes a given entry from the LRU list.
+ void Remove(MemEntryImpl* node);
+
+ // Moves a given entry to the head.
+ void UpdateRank(MemEntryImpl* node);
+
+ // Iterates through the list.
+ MemEntryImpl* GetNext(MemEntryImpl* node);
+ MemEntryImpl* GetPrev(MemEntryImpl* node);
+
+ private:
+ MemEntryImpl* head_;
+ MemEntryImpl* tail_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemRankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEMORY_MEM_RANKINGS_H_
diff --git a/net/net.gypi b/net/net.gypi
index 2bb39df..994a613 100644
--- a/net/net.gypi
+++ b/net/net.gypi
@@ -715,6 +715,8 @@
'disk_cache/memory/mem_backend_impl.h',
'disk_cache/memory/mem_entry_impl.cc',
'disk_cache/memory/mem_entry_impl.h',
+ 'disk_cache/memory/mem_rankings.cc',
+ 'disk_cache/memory/mem_rankings.h',
'disk_cache/net_log_parameters.cc',
'disk_cache/net_log_parameters.h',
'disk_cache/simple/simple_backend_impl.cc',