summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/disk_cache/backend_impl.cc281
-rw-r--r--net/disk_cache/backend_impl.h41
-rw-r--r--net/disk_cache/backend_unittest.cc8
-rw-r--r--net/disk_cache/disk_cache.h8
-rw-r--r--net/disk_cache/disk_format.h12
-rw-r--r--net/disk_cache/entry_impl.cc55
-rw-r--r--net/disk_cache/entry_impl.h12
-rw-r--r--net/disk_cache/eviction.cc259
-rw-r--r--net/disk_cache/eviction.h17
-rw-r--r--net/disk_cache/rankings.cc24
-rw-r--r--net/disk_cache/rankings.h29
-rw-r--r--net/disk_cache/stats.cc1
-rw-r--r--net/disk_cache/stats.h1
13 files changed, 638 insertions, 110 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 52d8f65..9f560cc 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -17,6 +17,9 @@
#include "net/disk_cache/hash.h"
#include "net/disk_cache/file.h"
+// Uncomment this to use the new eviction algorithm.
+// #define USE_NEW_EVICTION
+
using base::Time;
using base::TimeDelta;
@@ -184,6 +187,10 @@ bool BackendImpl::Init() {
if (init_)
return false;
+#ifdef USE_NEW_EVICTION
+ new_eviction_ = true;
+#endif
+
bool create_files = false;
if (!InitBackingStore(&create_files)) {
ReportError(ERR_STORAGE_ERROR);
@@ -231,7 +238,7 @@ bool BackendImpl::Init() {
if (!stats_.Init(this, &data_->header.stats))
return false;
- disabled_ = !rankings_.Init(this);
+ disabled_ = !rankings_.Init(this, new_eviction_);
eviction_.Init(this);
return !disabled_;
@@ -256,7 +263,16 @@ BackendImpl::~BackendImpl() {
int32 BackendImpl::GetEntryCount() const {
if (!index_)
return 0;
- return data_->header.num_entries;
+ // num_entries includes entries already evicted.
+ int32 not_deleted = data_->header.num_entries -
+ data_->header.lru.sizes[Rankings::DELETED];
+
+ if (not_deleted < 0) {
+ NOTREACHED();
+ not_deleted = 0;
+ }
+
+ return not_deleted;
}
bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) {
@@ -272,6 +288,13 @@ bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) {
return false;
}
+ if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
+ // The entry was already evicted.
+ cache_entry->Release();
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return false;
+ }
+
eviction_.OnOpenEntry(cache_entry);
DCHECK(entry);
*entry = cache_entry;
@@ -285,16 +308,24 @@ bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) {
if (disabled_ || key.empty())
return false;
+ DCHECK(entry);
+ *entry = NULL;
+
Time start = Time::Now();
uint32 hash = Hash(key);
scoped_refptr<EntryImpl> parent;
Addr entry_address(data_->table[hash & mask_]);
if (entry_address.is_initialized()) {
+ // We have an entry already. It could be the one we are looking for, or just
+ // a hash conflict.
+ EntryImpl* old_entry = MatchEntry(key, hash, false);
+ if (old_entry)
+ return ResurrectEntry(old_entry, entry);
+
EntryImpl* parent_entry = MatchEntry(key, hash, true);
if (!parent_entry) {
- stats_.OnEvent(Stats::CREATE_MISS);
- Trace("create entry miss ");
+ NOTREACHED();
return false;
}
parent.swap(&parent_entry);
@@ -339,14 +370,11 @@ bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) {
block_files_.GetFile(entry_address)->Store(cache_entry->entry());
block_files_.GetFile(node_address)->Store(cache_entry->rankings());
- data_->header.num_entries++;
- DCHECK(data_->header.num_entries > 0);
+ IncreaseNumEntries();
eviction_.OnCreateEntry(cache_entry);
if (!parent.get())
data_->table[hash & mask_] = entry_address.value();
- DCHECK(entry);
- *entry = NULL;
cache_entry.swap(reinterpret_cast<EntryImpl**>(entry));
UMA_HISTOGRAM_TIMES("DiskCache.CreateTime", Time::Now() - start);
@@ -451,8 +479,8 @@ bool BackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
}
void BackendImpl::EndEnumeration(void** iter) {
- Rankings::ScopedRankingsBlock rankings(&rankings_,
- reinterpret_cast<CacheRankingsBlock*>(*iter));
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(*iter));
*iter = NULL;
}
@@ -600,11 +628,26 @@ void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
data_->table[hash & mask_] = child;
}
- data_->header.num_entries--;
- DCHECK(data_->header.num_entries >= 0);
+ if (!new_eviction_) {
+ DecreaseNumEntries();
+ }
+
stats_.OnEvent(Stats::DOOM_ENTRY);
}
+// An entry may be linked on the DELETED list for a while after being doomed.
+// This function is called when we want to remove it.
+void BackendImpl::RemoveEntry(EntryImpl* entry) {
+ if (!new_eviction_)
+ return;
+
+ DCHECK(ENTRY_DOOMED == entry->entry()->Data()->state);
+
+ Trace("Remove entry 0x%p", entry);
+ eviction_.OnDestroyEntry(entry);
+ DecreaseNumEntries();
+}
+
void BackendImpl::CacheEntryDestroyed() {
DecreaseNumRefs();
}
@@ -742,6 +785,10 @@ bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
IndexHeader header;
header.table_len = DesiredIndexTableLen(max_size_);
+ // We need file version 2.1 for the new eviction algorithm.
+ if (new_eviction_)
+ header.version = 0x20001;
+
if (!file->Write(&header, sizeof(header), 0))
return false;
@@ -962,49 +1009,153 @@ bool BackendImpl::OpenFollowingEntry(bool forward, void** iter,
if (disabled_)
return false;
- Rankings::ScopedRankingsBlock rankings(&rankings_,
- reinterpret_cast<CacheRankingsBlock*>(*iter));
- CacheRankingsBlock* next_block = forward ?
- rankings_.GetNext(rankings.get(), Rankings::NO_USE) :
- rankings_.GetPrev(rankings.get(), Rankings::NO_USE);
- Rankings::ScopedRankingsBlock next(&rankings_, next_block);
+ DCHECK(iter);
+ DCHECK(next_entry);
*next_entry = NULL;
+
+ const int kListsToSearch = 3;
+ scoped_refptr<EntryImpl> entries[kListsToSearch];
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(*iter));
*iter = NULL;
- if (!next.get())
+
+ if (!iterator.get()) {
+ iterator.reset(new Rankings::Iterator(&rankings_));
+ bool ret = false;
+
+ // Get an entry from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
+ &iterator->nodes[i], &temp);
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ if (!ret)
+ return false;
+ } else {
+ // Get the next entry from the last list, and the actual entries for the
+ // elements on the other lists.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ if (iterator->list == i) {
+ OpenFollowingEntryFromList(forward, iterator->list,
+ &iterator->nodes[i], &temp);
+ } else {
+ temp = GetEnumeratedEntry(iterator->nodes[i]);
+ }
+
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ }
+
+ int newest = -1;
+ int oldest = -1;
+ Time access_times[kListsToSearch];
+ for (int i = 0; i < kListsToSearch; i++) {
+ if (entries[i].get()) {
+ access_times[i] = entries[i]->GetLastUsed();
+ if (newest < 0) {
+ DCHECK(oldest < 0);
+ newest = oldest = i;
+ continue;
+ }
+ if (access_times[i] > access_times[newest])
+ newest = i;
+ if (access_times[i] < access_times[oldest])
+ oldest = i;
+ }
+ }
+
+ if (newest < 0 || oldest < 0)
+ return false;
+
+ if (forward) {
+ entries[newest].swap(reinterpret_cast<EntryImpl**>(next_entry));
+ iterator->list = static_cast<Rankings::List>(newest);
+ } else {
+ entries[oldest].swap(reinterpret_cast<EntryImpl**>(next_entry));
+ iterator->list = static_cast<Rankings::List>(oldest);
+ }
+
+ *iter = iterator.release();
+ return true;
+}
+
+bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
+ CacheRankingsBlock** from_entry,
+ EntryImpl** next_entry) {
+ if (disabled_)
+ return false;
+
+ if (!new_eviction_ && Rankings::NO_USE != list)
+ return false;
+
+ Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
+ CacheRankingsBlock* next_block = forward ?
+ rankings_.GetNext(rankings.get(), list) :
+ rankings_.GetPrev(rankings.get(), list);
+ Rankings::ScopedRankingsBlock next(&rankings_, next_block);
+ *from_entry = NULL;
+
+ *next_entry = GetEnumeratedEntry(next.get());
+ if (!*next_entry)
return false;
+ *from_entry = next.release();
+ return true;
+}
+
+EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next) {
+ if (!next)
+ return NULL;
+
scoped_refptr<EntryImpl> entry;
+ EntryImpl* temp = NULL;
+
if (next->Data()->pointer) {
entry = reinterpret_cast<EntryImpl*>(next->Data()->pointer);
- } else {
- bool dirty;
- EntryImpl* temp = NULL;
- if (NewEntry(Addr(next->Data()->contents), &temp, &dirty))
- return false;
entry.swap(&temp);
+ return temp;
+ }
- if (dirty) {
- // We cannot trust this entry. Call MatchEntry to go through the regular
- // path and take the appropriate action.
- std::string key = entry->GetKey();
- uint32 hash = entry->GetHash();
- entry = NULL; // Release the entry.
- temp = MatchEntry(key, hash, false);
- if (temp)
- temp->Release();
+ bool dirty;
+ if (NewEntry(Addr(next->Data()->contents), &temp, &dirty))
+ return NULL;
+ entry.swap(&temp);
+
+ if (dirty) {
+ // We cannot trust this entry. Call MatchEntry to go through the regular
+ // path and take the appropriate action.
+ std::string key = entry->GetKey();
+ uint32 hash = entry->GetHash();
+ entry = NULL; // Release the entry.
+ temp = MatchEntry(key, hash, false);
+ if (temp)
+ temp->Release();
- return false;
- }
+ return NULL;
+ }
- entry.swap(&temp);
- temp = EntryImpl::Update(temp); // Update returns an adref'd entry.
- entry.swap(&temp);
- if (!entry.get())
- return false;
+ entry.swap(&temp);
+ return EntryImpl::Update(temp); // Update returns an adref'd entry.
+}
+
+bool BackendImpl::ResurrectEntry(EntryImpl* deleted_entry, Entry** entry) {
+ if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
+ deleted_entry->Release();
+ stats_.OnEvent(Stats::CREATE_MISS);
+ Trace("create entry miss ");
+ return false;
}
- entry.swap(reinterpret_cast<EntryImpl**>(next_entry));
- *iter = next.release();
+ // We are attempting to create an entry and found out that the entry was
+ // previously deleted.
+
+ eviction_.OnCreateEntry(deleted_entry);
+ *entry = deleted_entry;
+
+ stats_.OnEvent(Stats::CREATE_HIT);
+ Trace("Resurrect entry hit ");
return true;
}
@@ -1017,8 +1168,8 @@ void BackendImpl::DestroyInvalidEntry(Addr address, EntryImpl* entry) {
eviction_.OnDoomEntry(entry);
entry->InternalDoom();
- data_->header.num_entries--;
- DCHECK(data_->header.num_entries >= 0);
+ if (!new_eviction_)
+ DecreaseNumEntries();
stats_.OnEvent(Stats::INVALID_ENTRY);
}
@@ -1049,6 +1200,19 @@ void BackendImpl::DecreaseNumRefs() {
RestartCache();
}
+void BackendImpl::IncreaseNumEntries() {
+ data_->header.num_entries++;
+ DCHECK(data_->header.num_entries > 0);
+}
+
+void BackendImpl::DecreaseNumEntries() {
+ data_->header.num_entries--;
+ if (data_->header.num_entries < 0) {
+ NOTREACHED();
+ data_->header.num_entries = 0;
+ }
+}
+
void BackendImpl::LogStats() {
StatsItems stats;
GetStats(&stats);
@@ -1058,6 +1222,14 @@ void BackendImpl::LogStats() {
}
}
+void BackendImpl::UpgradeTo2_1() {
+ // 2.1 is basically the same as 2.0, except that new fields are actually
+ // updated by the new eviction algorithm.
+ DCHECK(0x20000 == data_->header.version);
+ data_->header.version = 0x20001;
+ data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
+}
+
bool BackendImpl::CheckIndex() {
if (!data_) {
LOG(ERROR) << "Unable to map Index file";
@@ -1070,10 +1242,23 @@ bool BackendImpl::CheckIndex() {
return false;
}
- if (kIndexMagic != data_->header.magic ||
- kCurrentVersion != data_->header.version) {
- LOG(ERROR) << "Invalid file version or magic";
- return false;
+ if (new_eviction_) {
+ // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion >> 16 != data_->header.version >> 16) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ if (kCurrentVersion == data_->header.version) {
+ // We need file version 2.1 for the new eviction algorithm.
+ UpgradeTo2_1();
+ }
+ } else {
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion != data_->header.version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
}
if (!data_->header.table_len) {
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index 89be795..f2ec98d 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -4,8 +4,8 @@
// See net/disk_cache/disk_cache.h for the public interface of the cache.
-#ifndef NET_DISK_CACHE_BACKEND_IMPL_H__
-#define NET_DISK_CACHE_BACKEND_IMPL_H__
+#ifndef NET_DISK_CACHE_BACKEND_IMPL_H_
+#define NET_DISK_CACHE_BACKEND_IMPL_H_
#include "base/timer.h"
#include "net/disk_cache/block_files.h"
@@ -24,11 +24,13 @@ class BackendImpl : public Backend {
public:
explicit BackendImpl(const std::wstring& path)
: path_(path), block_files_(path), mask_(0), max_size_(0), init_(false),
- restarted_(false), unit_test_(false), read_only_(false) {}
+ restarted_(false), unit_test_(false), read_only_(false),
+ new_eviction_(false) {}
// mask can be used to limit the usable size of the hash table, for testing.
BackendImpl(const std::wstring& path, uint32 mask)
: path_(path), block_files_(path), mask_(mask), max_size_(0),
- init_(false), restarted_(false), unit_test_(false), read_only_(false) {}
+ init_(false), restarted_(false), unit_test_(false), read_only_(false),
+ new_eviction_(false) {}
~BackendImpl();
// Performs general initialization for this current instance of the cache.
@@ -77,9 +79,12 @@ class BackendImpl : public Backend {
// method checks it and takes the appropriate action.
void RecoveredEntry(CacheRankingsBlock* rankings);
- // Permanently deletes an entry.
+ // Permanently deletes an entry, but still keeps track of it.
void InternalDoomEntry(EntryImpl* entry);
+ // Removes all references to this entry.
+ void RemoveEntry(EntryImpl* entry);
+
// This method must be called whenever an entry is released for the last time.
void CacheEntryDestroyed();
@@ -144,12 +149,24 @@ class BackendImpl : public Backend {
// Returns a given entry from the cache. The entry to match is determined by
// key and hash, and the returned entry may be the matched one or it's parent
// on the list of entries with the same hash (or bucket).
- EntryImpl* MatchEntry(const std::string& key, uint32 hash,
- bool find_parent);
+ EntryImpl* MatchEntry(const std::string& key, uint32 hash, bool find_parent);
// Opens the next or previous entry on a cache iteration.
bool OpenFollowingEntry(bool forward, void** iter, Entry** next_entry);
+ // Opens the next or previous entry on a single list. If successfull,
+ // |from_entry| will be updated to point to the new entry, otherwise it will
+ // be set to NULL; in other words, it is used as an explicit iterator.
+ bool OpenFollowingEntryFromList(bool forward, Rankings::List list,
+ CacheRankingsBlock** from_entry,
+ EntryImpl** next_entry);
+
+ // Returns the entry that is pointed by |next|.
+ EntryImpl* GetEnumeratedEntry(CacheRankingsBlock* next);
+
+ // Re-opens an entry that was previously deleted.
+ bool ResurrectEntry(EntryImpl* deleted_entry, Entry** entry);
+
void DestroyInvalidEntry(Addr address, EntryImpl* entry);
// Handles the used storage count.
@@ -159,10 +176,15 @@ class BackendImpl : public Backend {
// Update the number of referenced cache entries.
void IncreaseNumRefs();
void DecreaseNumRefs();
+ void IncreaseNumEntries();
+ void DecreaseNumEntries();
// Dumps current cache statistics to the log.
void LogStats();
+ // Upgrades the index file to version 2.1.
+ void UpgradeTo2_1();
+
// Performs basic checks on the index file. Returns false on failure.
bool CheckIndex();
@@ -181,13 +203,14 @@ class BackendImpl : public Backend {
int32 max_size_; // Maximum data size for this instance.
Eviction eviction_; // Handler of the eviction algorithm.
int num_refs_; // Number of referenced cache entries.
- int max_refs_; // Max number of eferenced cache entries.
+ int max_refs_; // Max number of referenced cache entries.
int num_pending_io_; // Number of pending IO operations;
bool init_; // controls the initialization of the system.
bool restarted_;
bool unit_test_;
bool read_only_; // Prevents updates of the rankings data (used by tools).
bool disabled_;
+ bool new_eviction_; // What eviction algorithm should be used.
Stats stats_; // Usage statistcs.
base::RepeatingTimer<BackendImpl> timer_; // Usage timer.
@@ -198,5 +221,5 @@ class BackendImpl : public Backend {
} // namespace disk_cache
-#endif // NET_DISK_CACHE_BACKEND_IMPL_H__
+#endif // NET_DISK_CACHE_BACKEND_IMPL_H_
diff --git a/net/disk_cache/backend_unittest.cc b/net/disk_cache/backend_unittest.cc
index 3c9b166..5e09a63 100644
--- a/net/disk_cache/backend_unittest.cc
+++ b/net/disk_cache/backend_unittest.cc
@@ -259,11 +259,7 @@ void DiskCacheBackendTest::BackendSetSize() {
SetMaxSize(cache_size);
- // Verify that the cache is 95% full.
- ASSERT_TRUE(cache_->OpenEntry(first, &entry));
- EXPECT_EQ(cache_size * 3 / 4, entry->GetDataSize(0));
- EXPECT_EQ(cache_size / 5, entry->GetDataSize(1));
- entry->Close();
+ // The cache is 95% full.
ASSERT_TRUE(cache_->CreateEntry(second, &entry));
EXPECT_EQ(cache_size / 10, entry->WriteData(0, 0, buffer, cache_size / 10,
@@ -692,6 +688,8 @@ void DiskCacheBackendTest::BackendDoomBetween() {
ASSERT_TRUE(cache_->CreateEntry("fourth", &entry));
entry->Close();
+ ASSERT_TRUE(cache_->OpenEntry("fourth", &entry));
+ entry->Close();
PlatformThread::Sleep(20);
Time final = Time::Now();
diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h
index 7d6e41a..5173ab9 100644
--- a/net/disk_cache/disk_cache.h
+++ b/net/disk_cache/disk_cache.h
@@ -3,10 +3,10 @@
// found in the LICENSE file.
// Defines the public interface of the disk cache. For more details see
-// http://wiki/Main/ChromeDiskCacheBackend
+// http://dev.chromium.org/developers/design-documents/disk-cache
-#ifndef NET_DISK_CACHE_DISK_CACHE_H__
-#define NET_DISK_CACHE_DISK_CACHE_H__
+#ifndef NET_DISK_CACHE_DISK_CACHE_H_
+#define NET_DISK_CACHE_DISK_CACHE_H_
#include <string>
#include <vector>
@@ -183,5 +183,5 @@ class Entry {
} // namespace disk_cache
-#endif // NET_DISK_CACHE_DISK_CACHE_H__
+#endif // NET_DISK_CACHE_DISK_CACHE_H_
diff --git a/net/disk_cache/disk_format.h b/net/disk_cache/disk_format.h
index 7dc2e7c..f089d7b 100644
--- a/net/disk_cache/disk_format.h
+++ b/net/disk_cache/disk_format.h
@@ -52,8 +52,8 @@
// being currently used, it means that the entry was not properly closed on a
// previous run, so it is discarded.
-#ifndef NET_DISK_CACHE_DISK_FORMAT_H__
-#define NET_DISK_CACHE_DISK_FORMAT_H__
+#ifndef NET_DISK_CACHE_DISK_FORMAT_H_
+#define NET_DISK_CACHE_DISK_FORMAT_H_
#include "base/basictypes.h"
@@ -66,12 +66,14 @@ const uint32 kIndexMagic = 0xC103CAC3;
const uint32 kCurrentVersion = 0x20000; // Version 2.0.
struct LruData {
+ int32 pad1[3];
+ int32 sizes[5];
CacheAddr heads[5];
CacheAddr tails[5];
CacheAddr transaction; // In-flight operation target.
int32 operation; // Actual in-flight operation.
int32 operation_list; // In-flight operation list.
- int32 pad[7];
+ int32 pad2[7];
};
// Header for the master index file.
@@ -86,7 +88,7 @@ struct IndexHeader {
int32 table_len; // Actual size of the table (0 == kIndexTablesize).
int32 crash; // Signals a previous crash.
int32 experiment; // Id of an ongoing test.
- int32 pad[62];
+ int32 pad[54];
LruData lru; // Eviction control data.
IndexHeader() {
memset(this, 0, sizeof(*this));
@@ -188,5 +190,5 @@ COMPILE_ASSERT(sizeof(BlockFileHeader) == kBlockHeaderSize, bad_header);
} // namespace disk_cache
-#endif // NET_DISK_CACHE_DISK_FORMAT_H__
+#endif // NET_DISK_CACHE_DISK_FORMAT_H_
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 9db5647..361a7ee 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -92,27 +92,7 @@ EntryImpl::EntryImpl(BackendImpl* backend, Addr address)
// written before).
EntryImpl::~EntryImpl() {
if (doomed_) {
- UMA_HISTOGRAM_COUNTS("DiskCache.DeleteHeader", GetDataSize(0));
- UMA_HISTOGRAM_COUNTS("DiskCache.DeleteData", GetDataSize(1));
- for (int index = 0; index < NUM_STREAMS; index++) {
- Addr address(entry_.Data()->data_addr[index]);
- if (address.is_initialized()) {
- DeleteData(address, index);
- backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
- unreported_size_[index], 0);
- }
- }
- Addr address(entry_.Data()->long_key);
- DeleteData(address, kKeyFileIndex);
- backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
-
- memset(node_.buffer(), 0, node_.size());
- memset(entry_.buffer(), 0, entry_.size());
- node_.Store();
- entry_.Store();
-
- backend_->DeleteBlock(node_.address(), false);
- backend_->DeleteBlock(entry_.address(), false);
+ DeleteEntryData(true);
} else {
bool ret = true;
for (int index = 0; index < NUM_STREAMS; index++) {
@@ -474,6 +454,39 @@ void EntryImpl::InternalDoom() {
doomed_ = true;
}
+void EntryImpl::DeleteEntryData(bool everything) {
+ DCHECK(doomed_ || !everything);
+
+ UMA_HISTOGRAM_COUNTS("DiskCache.DeleteHeader", GetDataSize(0));
+ UMA_HISTOGRAM_COUNTS("DiskCache.DeleteData", GetDataSize(1));
+ for (int index = 0; index < NUM_STREAMS; index++) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ DeleteData(address, index);
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ }
+ }
+
+ if (!everything)
+ return;
+
+ // Remove all traces of this entry.
+ backend_->RemoveEntry(this);
+
+ Addr address(entry_.Data()->long_key);
+ DeleteData(address, kKeyFileIndex);
+ backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
+
+ memset(node_.buffer(), 0, node_.size());
+ memset(entry_.buffer(), 0, entry_.size());
+ node_.Store();
+ entry_.Store();
+
+ backend_->DeleteBlock(node_.address(), false);
+ backend_->DeleteBlock(entry_.address(), false);
+}
+
CacheAddr EntryImpl::GetNextAddress() {
return entry_.Data()->next;
}
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
index 6d3d614..5a467e4 100644
--- a/net/disk_cache/entry_impl.h
+++ b/net/disk_cache/entry_impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef NET_DISK_CACHE_ENTRY_IMPL_H__
-#define NET_DISK_CACHE_ENTRY_IMPL_H__
+#ifndef NET_DISK_CACHE_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_ENTRY_IMPL_H_
#include "net/disk_cache/disk_cache.h"
#include "net/disk_cache/storage_block.h"
@@ -52,9 +52,13 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
// Returns true if this entry matches the lookup arguments.
bool IsSameEntry(const std::string& key, uint32 hash);
- // Permamently destroys this entry
+ // Permamently destroys this entry.
void InternalDoom();
+ // Deletes this entry from disk. If |everything| is false, only the user data
+ // will be removed, leaving the key and control data intact.
+ void DeleteEntryData(bool everything);
+
// Returns the address of the next entry on the list of entries with the same
// hash.
CacheAddr GetNextAddress();
@@ -148,5 +152,5 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
} // namespace disk_cache
-#endif // NET_DISK_CACHE_ENTRY_IMPL_H__
+#endif // NET_DISK_CACHE_ENTRY_IMPL_H_
diff --git a/net/disk_cache/eviction.cc b/net/disk_cache/eviction.cc
index 11bce72..a7eb9fc 100644
--- a/net/disk_cache/eviction.cc
+++ b/net/disk_cache/eviction.cc
@@ -2,6 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// The eviction policy is a very simple pure LRU, so the elements at the end of
+// the list are evicted until kCleanUpMargin free space is available. There is
+// only one list in use (Rankings::NO_USE), and elements are sent to the front
+// of the list whenever they are accessed.
+
+// The new (in-development) eviction policy ads re-use as a factor to evict
+// an entry. The story so far:
+
+// Entries are linked on separate lists depending on how often they are used.
+// When we see an element for the first time, it goes to the NO_USE list; if
+// the object is reused later on, we move it to the LOW_USE list, until it is
+// used kHighUse times, at which point it is moved to the HIGH_USE list.
+// Whenever an element is evicted, we move it to the DELETED list so that if the
+// element is accessed again, we remember the fact that it was already stored
+// and maybe in the future we don't evict that element.
+
+// When we have to evict an element, first we try to use the last element from
+// the NO_USE list, then we move to the LOW_USE and only then we evict an entry
+// from the HIGH_USE. We attempt to keep entries on the cache for at least
+// kTargetTime hours (with frequently accessed items stored for longer periods),
+// but if we cannot do that, we fall-back to keep each list roughly the same
+// size so that we have a chance to see an element again and move it to another
+// list.
+
#include "net/disk_cache/eviction.h"
#include "base/logging.h"
@@ -17,6 +41,8 @@ using base::Time;
namespace {
const int kCleanUpMargin = 1024 * 1024;
+const int kHighUse = 10; // Reuse count to be on the HIGH_USE list.
+const int kTargetTime = 24 * 7; // Time to be evicted (hours since last use).
int LowWaterAdjust(int high_water) {
if (high_water < kCleanUpMargin)
@@ -36,9 +62,13 @@ void Eviction::Init(BackendImpl* backend) {
rankings_ = &backend->rankings_;
header_ = &backend_->data_->header;
max_size_ = LowWaterAdjust(backend_->max_size_);
+ new_eviction_ = backend->new_eviction_;
}
void Eviction::TrimCache(bool empty) {
+ if (new_eviction_)
+ return TrimCacheV2(empty);
+
Trace("*** Trim Cache ***");
if (backend_->disabled_)
return;
@@ -55,19 +85,9 @@ void Eviction::TrimCache(bool empty) {
next.reset(rankings_->GetPrev(node.get(), Rankings::NO_USE));
if (!node->Data()->pointer || empty) {
// This entry is not being used by anybody.
- EntryImpl* entry;
- bool dirty;
- if (backend_->NewEntry(Addr(node->Data()->contents), &entry, &dirty)) {
- Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ if (!EvictEntry(node.get(), empty))
continue;
- }
- if (node->Data()->pointer) {
- entry = EntryImpl::Update(entry);
- }
- ReportTrimTimes(entry);
- entry->Doom();
- entry->Release();
if (!empty)
backend_->OnEvent(Stats::TRIM_ENTRY);
if (++deleted == 4 && !empty) {
@@ -86,21 +106,34 @@ void Eviction::TrimCache(bool empty) {
}
void Eviction::UpdateRank(EntryImpl* entry, bool modified) {
+ if (new_eviction_)
+ return UpdateRankV2(entry, modified);
+
rankings_->UpdateRank(entry->rankings(), modified, GetListForEntry(entry));
}
void Eviction::OnOpenEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnOpenEntryV2(entry);
}
void Eviction::OnCreateEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnCreateEntryV2(entry);
+
rankings_->Insert(entry->rankings(), true, GetListForEntry(entry));
}
void Eviction::OnDoomEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnDoomEntryV2(entry);
+
rankings_->Remove(entry->rankings(), GetListForEntry(entry));
}
void Eviction::OnDestroyEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnDestroyEntryV2(entry);
}
void Eviction::ReportTrimTimes(EntryImpl* entry) {
@@ -119,4 +152,208 @@ Rankings::List Eviction::GetListForEntry(EntryImpl* entry) {
return Rankings::NO_USE;
}
+bool Eviction::EvictEntry(CacheRankingsBlock* node, bool empty) {
+ EntryImpl* entry;
+ bool dirty;
+ if (backend_->NewEntry(Addr(node->Data()->contents), &entry, &dirty)) {
+ Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ return false;
+ }
+
+ if (node->Data()->pointer) {
+ entry = EntryImpl::Update(entry);
+ }
+ ReportTrimTimes(entry);
+ if (empty || !new_eviction_) {
+ entry->Doom();
+ } else {
+ entry->DeleteEntryData(false);
+ EntryStore* info = entry->entry()->Data();
+ DCHECK(ENTRY_NORMAL == info->state);
+
+ rankings_->Remove(entry->rankings(), GetListForEntryV2(entry));
+ info->state = ENTRY_EVICTED;
+ entry->entry()->Store();
+ rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
+ backend_->OnEvent(Stats::TRIM_ENTRY);
+ }
+ entry->Release();
+
+ return true;
+}
+
+// -----------------------------------------------------------------------
+
+void Eviction::TrimCacheV2(bool empty) {
+ Trace("*** Trim Cache ***");
+ if (backend_->disabled_)
+ return;
+
+ Time start = Time::Now();
+
+ const int kListsToSearch = 3;
+ Rankings::ScopedRankingsBlock next[kListsToSearch];
+ int list = Rankings::LAST_ELEMENT;
+
+ // Get a node from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ bool done = false;
+ next[i].set_rankings(rankings_);
+ if (done)
+ continue;
+ next[i].reset(rankings_->GetPrev(NULL, static_cast<Rankings::List>(i)));
+ if (!empty && NodeIsOldEnough(next[i].get(), i)) {
+ list = static_cast<Rankings::List>(i);
+ done = true;
+ }
+ }
+
+ // If we are not meeting the time targets lets move on to list length.
+ if (!empty && Rankings::LAST_ELEMENT == list)
+ list = SelectListByLenght();
+
+ if (empty)
+ list = 0;
+
+ Rankings::ScopedRankingsBlock node(rankings_);
+
+ int target_size = empty ? 0 : max_size_;
+ int deleted = 0;
+ for (; list < kListsToSearch; list++) {
+ while (header_->num_bytes > target_size && next[list].get()) {
+ node.reset(next[list].release());
+ next[list].reset(rankings_->GetPrev(node.get(),
+ static_cast<Rankings::List>(list)));
+ if (!node->Data()->pointer || empty) {
+ // This entry is not being used by anybody.
+ if (!EvictEntry(node.get(), empty))
+ continue;
+
+ if (++deleted == 4 && !empty) {
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&Eviction::TrimCache, false));
+ break;
+ }
+ }
+ }
+ if (!empty)
+ list = kListsToSearch;
+ }
+
+ if (empty || header_->lru.sizes[Rankings::DELETED] > header_->num_entries / 4)
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&Eviction::TrimDeleted, empty));
+
+ UMA_HISTOGRAM_TIMES("DiskCache.TotalTrimTime", Time::Now() - start);
+ Trace("*** Trim Cache end ***");
+ return;
+}
+
+void Eviction::UpdateRankV2(EntryImpl* entry, bool modified) {
+ rankings_->UpdateRank(entry->rankings(), modified, GetListForEntryV2(entry));
+}
+
+void Eviction::OnOpenEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ DCHECK(ENTRY_NORMAL == info->state);
+
+ if (info->reuse_count < kint32max) {
+ info->reuse_count++;
+ entry->entry()->set_modified();
+
+ // We may need to move this to a new list.
+ if (1 == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::NO_USE);
+ rankings_->Insert(entry->rankings(), false, Rankings::LOW_USE);
+ entry->entry()->Store();
+ } else if (kHighUse == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::LOW_USE);
+ rankings_->Insert(entry->rankings(), false, Rankings::HIGH_USE);
+ entry->entry()->Store();
+ }
+ }
+}
+
+void Eviction::OnCreateEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ switch (info->state) {
+ case ENTRY_NORMAL: {
+ DCHECK(!info->reuse_count);
+ DCHECK(!info->refetch_count);
+ break;
+ };
+ case ENTRY_EVICTED: {
+ if (info->refetch_count < kint32max)
+ info->refetch_count++;
+
+ if (info->refetch_count > kHighUse && info->reuse_count < kHighUse) {
+ info->reuse_count = kHighUse;
+ } else {
+ info->reuse_count++;
+ }
+ info->state = ENTRY_NORMAL;
+ entry->entry()->Store();
+ rankings_->Remove(entry->rankings(), Rankings::DELETED);
+ break;
+ };
+ default:
+ NOTREACHED();
+ }
+
+ rankings_->Insert(entry->rankings(), true, GetListForEntryV2(entry));
+}
+
+void Eviction::OnDoomEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ if (ENTRY_NORMAL != info->state)
+ return;
+
+ rankings_->Remove(entry->rankings(), GetListForEntryV2(entry));
+
+ info->state = ENTRY_DOOMED;
+ entry->entry()->Store();
+ rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
+}
+
+void Eviction::OnDestroyEntryV2(EntryImpl* entry) {
+ rankings_->Remove(entry->rankings(), Rankings::DELETED);
+}
+
+Rankings::List Eviction::GetListForEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ DCHECK(ENTRY_NORMAL == info->state);
+
+ if (!info->reuse_count)
+ return Rankings::NO_USE;
+
+ if (info->reuse_count < kHighUse)
+ return Rankings::LOW_USE;
+
+ return Rankings::HIGH_USE;
+}
+
+void Eviction::TrimDeleted(bool empty) {
+}
+
+bool Eviction::NodeIsOldEnough(CacheRankingsBlock* node, int list) {
+ if (!node)
+ return false;
+
+ // If possible, we want to keep entries on each list at least kTargetTime
+ // hours. Each successive list on the enumeration has 2x the target time of
+ // the previous list.
+ Time used = Time::FromInternalValue(node->Data()->last_used);
+ int multiplier = 1 << list;
+ return (Time::Now() - used).InHours() > kTargetTime * multiplier;
+}
+
+int Eviction::SelectListByLenght() {
+ // Start by having each list to be roughly the same size.
+ if (header_->lru.sizes[0] > header_->num_entries / 4)
+ return 0;
+ if (header_->lru.sizes[1] > header_->num_entries / 4)
+ return 1;
+ return 2;
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/eviction.h b/net/disk_cache/eviction.h
index dd4e467..2496357 100644
--- a/net/disk_cache/eviction.h
+++ b/net/disk_cache/eviction.h
@@ -42,11 +42,28 @@ class Eviction {
private:
void ReportTrimTimes(EntryImpl* entry);
Rankings::List GetListForEntry(EntryImpl* entry);
+ bool EvictEntry(CacheRankingsBlock* node, bool empty);
+
+ // We'll just keep for a while a separate set of methods that implement the
+ // new eviction algorithm. This code will replace the original methods when
+ // finished.
+ void TrimCacheV2(bool empty);
+ void UpdateRankV2(EntryImpl* entry, bool modified);
+ void OnOpenEntryV2(EntryImpl* entry);
+ void OnCreateEntryV2(EntryImpl* entry);
+ void OnDoomEntryV2(EntryImpl* entry);
+ void OnDestroyEntryV2(EntryImpl* entry);
+ Rankings::List GetListForEntryV2(EntryImpl* entry);
+ void TrimDeleted(bool empty);
+
+ bool NodeIsOldEnough(CacheRankingsBlock* node, int list);
+ int SelectListByLenght();
BackendImpl* backend_;
Rankings* rankings_;
IndexHeader* header_;
int max_size_;
+ bool new_eviction_;
ScopedRunnableMethodFactory<Eviction> factory_;
DISALLOW_COPY_AND_ASSIGN(Eviction);
diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc
index 01a7222..962563b 100644
--- a/net/disk_cache/rankings.cc
+++ b/net/disk_cache/rankings.cc
@@ -174,14 +174,14 @@ void GenerateCrash(CrashLocation location) {
namespace disk_cache {
-bool Rankings::Init(BackendImpl* backend) {
+bool Rankings::Init(BackendImpl* backend, bool count_lists) {
DCHECK(!init_);
if (init_)
return false;
backend_ = backend;
-
control_data_ = backend_->GetLruData();
+ count_lists_ = count_lists;
ReadHeads();
ReadTails();
@@ -281,6 +281,7 @@ void Rankings::Insert(CacheRankingsBlock* node, bool modified, List list) {
// The last thing to do is move our head to point to a node already stored.
WriteHead(list);
+ IncrementCounter(list);
GenerateCrash(ON_INSERT_4);
}
@@ -377,6 +378,7 @@ void Rankings::Remove(CacheRankingsBlock* node, List list) {
prev.Store();
GenerateCrash(ON_REMOVE_8);
node->Store();
+ DecrementCounter(list);
UpdateIterators(&next);
UpdateIterators(&prev);
}
@@ -747,4 +749,22 @@ void Rankings::UpdateIterators(CacheRankingsBlock* node) {
}
}
+void Rankings::IncrementCounter(List list) {
+ if (!count_lists_)
+ return;
+
+ DCHECK(control_data_->sizes[list] < kint32max);
+ if (control_data_->sizes[list] < kint32max)
+ control_data_->sizes[list]++;
+}
+
+void Rankings::DecrementCounter(List list) {
+ if (!count_lists_)
+ return;
+
+ DCHECK(control_data_->sizes[list] > 0);
+ if (control_data_->sizes[list] > 0)
+ control_data_->sizes[list]--;
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/rankings.h b/net/disk_cache/rankings.h
index d4c10afd..906a5b7 100644
--- a/net/disk_cache/rankings.h
+++ b/net/disk_cache/rankings.h
@@ -54,6 +54,7 @@ class Rankings {
NO_USE = 0, // List of entries that have not been reused.
LOW_USE, // List of entries with low reuse.
HIGH_USE, // List of entries with high reuse.
+ RESERVED, // Reserved for future use.
DELETED, // List of recently deleted or doomed entries.
LAST_ELEMENT
};
@@ -63,6 +64,7 @@ class Rankings {
// iterators that may go stale.
class ScopedRankingsBlock : public scoped_ptr<CacheRankingsBlock> {
public:
+ ScopedRankingsBlock() : rankings_(NULL) {}
explicit ScopedRankingsBlock(Rankings* rankings) : rankings_(rankings) {}
ScopedRankingsBlock(Rankings* rankings, CacheRankingsBlock* node)
: scoped_ptr<CacheRankingsBlock>(node), rankings_(rankings) {}
@@ -71,6 +73,10 @@ class Rankings {
rankings_->FreeRankingsBlock(get());
}
+ void set_rankings(Rankings* rankings) {
+ rankings_ = rankings;
+ }
+
// scoped_ptr::reset will delete the object.
void reset(CacheRankingsBlock* p = NULL) {
if (p != get())
@@ -83,10 +89,26 @@ class Rankings {
DISALLOW_EVIL_CONSTRUCTORS(ScopedRankingsBlock);
};
+ // If we have multiple lists, we have to iterate through all at the same time.
+ // This structure keeps track of where we are on the iteration.
+ struct Iterator {
+ List list; // Which entry was returned to the user.
+ CacheRankingsBlock* nodes[3]; // Nodes on the first three lists.
+ Rankings* my_rankings;
+ Iterator(Rankings* rankings) {
+ memset(this, 0, sizeof(Iterator));
+ my_rankings = rankings;
+ }
+ ~Iterator() {
+ for (int i = 0; i < 3; i++)
+ ScopedRankingsBlock(my_rankings, nodes[i]);
+ }
+ };
+
Rankings() : init_(false) {}
~Rankings() {}
- bool Init(BackendImpl* backend);
+ bool Init(BackendImpl* backend, bool count_lists);
// Restores original state, leaving the object ready for initialization.
void Reset();
@@ -155,7 +177,12 @@ class Rankings {
// Updates the iterators whenever node is being changed.
void UpdateIterators(CacheRankingsBlock* node);
+ // Keeps track of the number of entries on a list.
+ void IncrementCounter(List list);
+ void DecrementCounter(List list);
+
bool init_;
+ bool count_lists_;
Addr heads_[LAST_ELEMENT];
Addr tails_[LAST_ELEMENT];
BackendImpl* backend_;
diff --git a/net/disk_cache/stats.cc b/net/disk_cache/stats.cc
index a6733cb..6047c5c 100644
--- a/net/disk_cache/stats.cc
+++ b/net/disk_cache/stats.cc
@@ -40,6 +40,7 @@ static const char* kCounterNames[] = {
"Open hit",
"Create miss",
"Create hit",
+ "Resurrect hit",
"Create error",
"Trim entry",
"Doom entry",
diff --git a/net/disk_cache/stats.h b/net/disk_cache/stats.h
index a811e3f..6d8043e 100644
--- a/net/disk_cache/stats.h
+++ b/net/disk_cache/stats.h
@@ -28,6 +28,7 @@ class Stats {
OPEN_HIT,
CREATE_MISS,
CREATE_HIT,
+ RESURRECT_HIT,
CREATE_ERROR,
TRIM_ENTRY,
DOOM_ENTRY,