summaryrefslogtreecommitdiffstats
path: root/net/disk_cache
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2008-12-04 23:03:33 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2008-12-04 23:03:33 +0000
commit62cdf1eb96a4c410b503652a5656e0b197b1b9eb (patch)
treeda020c20415d39f79eab88c4a818934e1df2fede /net/disk_cache
parenta100d13626ad0ddeb4b0a7bb81eb9a736acc4d11 (diff)
downloadchromium_src-62cdf1eb96a4c410b503652a5656e0b197b1b9eb.zip
chromium_src-62cdf1eb96a4c410b503652a5656e0b197b1b9eb.tar.gz
chromium_src-62cdf1eb96a4c410b503652a5656e0b197b1b9eb.tar.bz2
Disk cache: Add support for an extra data stream for each cache entry.
This is the first step to allow the http cache to store additional metadata for certain entries. The cache file format changes to version 2.0 so an effect of this cl is that the borwser will discard the old cache files. Review URL: http://codereview.chromium.org/12880 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@6392 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r--net/disk_cache/backend_impl.cc4
-rw-r--r--net/disk_cache/backend_impl.h3
-rw-r--r--net/disk_cache/disk_format.h34
-rw-r--r--net/disk_cache/entry_impl.cc24
-rw-r--r--net/disk_cache/entry_impl.h14
-rw-r--r--net/disk_cache/entry_unittest.cc36
-rw-r--r--net/disk_cache/mem_entry_impl.cc13
-rw-r--r--net/disk_cache/mem_entry_impl.h8
-rw-r--r--net/disk_cache/rankings.cc76
-rw-r--r--net/disk_cache/rankings.h10
10 files changed, 152 insertions, 70 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 7aa1e78..d9fe0cc 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -540,6 +540,10 @@ void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
block_files_.DeleteBlock(block_address, deep);
}
+LruData* BackendImpl::GetLruData() {
+ return &data_->header.lru;
+}
+
void BackendImpl::UpdateRank(CacheRankingsBlock* node, bool modified) {
if (!read_only_)
rankings_.UpdateRank(node, modified);
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index 5c88682..7b747de 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -68,6 +68,9 @@ class BackendImpl : public Backend {
// the related storage in addition of releasing the related block.
void DeleteBlock(Addr block_address, bool deep);
+ // Retrieves a pointer to the lru-related data.
+ LruData* GetLruData();
+
// Updates the ranking information for an entry.
void UpdateRank(CacheRankingsBlock* node, bool modified);
diff --git a/net/disk_cache/disk_format.h b/net/disk_cache/disk_format.h
index d874359..2e839a1 100644
--- a/net/disk_cache/disk_format.h
+++ b/net/disk_cache/disk_format.h
@@ -36,7 +36,7 @@
// entry keeps track of all the information related to the same cache entry,
// such as the key, hash value, data pointers etc. A rankings node keeps track
// of the information that is updated frequently for a given entry, such as its
-// location on the LRU list, last access time etc.
+// location on the LRU lists, last access time etc.
//
// The files that store internal information for the cache (blocks and index)
// are at least partially memory mapped. They have a location that is signaled
@@ -63,7 +63,16 @@ typedef uint32 CacheAddr;
const int kIndexTablesize = 0x10000;
const uint32 kIndexMagic = 0xC103CAC3;
-const uint32 kCurrentVersion = 0x10003; // Version 1.3.
+const uint32 kCurrentVersion = 0x20000; // Version 2.0.
+
+struct LruData {
+ CacheAddr heads[5];
+ CacheAddr tails[5];
+ CacheAddr transaction; // In-flight operation target.
+ int32 operation; // Actual in-flight operation.
+ int32 operation_list; // In-flight operation list.
+ int32 pad[7];
+};
// Header for the master index file.
struct IndexHeader {
@@ -75,7 +84,8 @@ struct IndexHeader {
int32 this_id; // Id for all entries being changed (dirty flag).
CacheAddr stats; // Storage for usage data.
int32 table_len; // Actual size of the table (0 == kIndexTablesize).
- int32 pad[8];
+ int32 pad[64];
+ LruData lru; // Eviction control data.
IndexHeader() {
memset(this, 0, sizeof(*this));
magic = kIndexMagic;
@@ -99,17 +109,29 @@ struct EntryStore {
uint32 hash; // Full hash of the key.
CacheAddr next; // Next entry with the same hash or bucket.
CacheAddr rankings_node; // Rankings node for this entry.
+ int32 reuse_count; // How often is this entry used.
+ int32 refetch_count; // How often is this fetched from the net.
+ int32 state; // Current state.
+ uint64 creation_time;
int32 key_len;
CacheAddr long_key; // Optional address of a long key.
- int32 data_size[2]; // We can store up to 2 data chunks for each
- CacheAddr data_addr[2]; // entry.
- char key[256 - 9 * 4]; // null terminated
+ int32 data_size[4]; // We can store up to 4 data streams for each
+ CacheAddr data_addr[4]; // entry.
+ int32 pad[6];
+ char key[256 - 24 * 4]; // null terminated
};
COMPILE_ASSERT(sizeof(EntryStore) == 256, bad_EntyStore);
const int kMaxInternalKeyLength = 4 * sizeof(EntryStore) -
offsetof(EntryStore, key) - 1;
+// Possible states for a given entry.
+enum EntryState {
+ ENTRY_NORMAL = 0,
+ ENTRY_EVICTED, // The entry was recently evicted from the cache.
+ ENTRY_DOOMED // The entry was doomed.
+};
+
#pragma pack(push, old, 4)
// Rankings information for a given entry.
struct RankingsNode {
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 2e7b8c46..cc2b07d 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -16,6 +16,9 @@ using base::TimeDelta;
namespace {
+// Index for the file used to store the key, if any (files_[kKeyFileIndex]).
+const int kKeyFileIndex = 3;
+
// This class implements FileIOCallback to buffer the callback from a file IO
// operation from the actual net class.
class SyncCallback: public disk_cache::FileIOCallback {
@@ -72,7 +75,8 @@ EntryImpl::EntryImpl(BackendImpl* backend, Addr address)
entry_.LazyInit(backend->File(address), address);
doomed_ = false;
backend_ = backend;
- unreported_size_[0] = unreported_size_[1] = 0;
+ for (int i = 0; i < NUM_STREAMS; i++)
+ unreported_size_[i] = 0;
}
// When an entry is deleted from the cache, we clean up all the data associated
@@ -85,7 +89,7 @@ EntryImpl::~EntryImpl() {
if (doomed_) {
UMA_HISTOGRAM_COUNTS(L"DiskCache.DeleteHeader", GetDataSize(0));
UMA_HISTOGRAM_COUNTS(L"DiskCache.DeleteData", GetDataSize(1));
- for (int index = 0; index < kKeyFileIndex; index++) {
+ for (int index = 0; index < NUM_STREAMS; index++) {
Addr address(entry_.Data()->data_addr[index]);
if (address.is_initialized()) {
DeleteData(address, index);
@@ -106,7 +110,7 @@ EntryImpl::~EntryImpl() {
backend_->DeleteBlock(entry_.address(), false);
} else {
bool ret = true;
- for (int index = 0; index < kKeyFileIndex; index++) {
+ for (int index = 0; index < NUM_STREAMS; index++) {
if (user_buffers_[index].get()) {
if (!(ret = Flush(index, entry_.Data()->data_size[index], false)))
LOG(ERROR) << "Failed to save user data";
@@ -154,6 +158,7 @@ std::string EntryImpl::GetKey() const {
if (entry->Data()->key_len > kMaxInternalKeyLength) {
Addr address(entry->Data()->long_key);
DCHECK(address.is_initialized());
+ COMPILE_ASSERT(NUM_STREAMS == kKeyFileIndex, invalid_key_index);
File* file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
kKeyFileIndex);
@@ -182,7 +187,7 @@ Time EntryImpl::GetLastModified() const {
}
int32 EntryImpl::GetDataSize(int index) const {
- if (index < 0 || index > 1)
+ if (index < 0 || index >= NUM_STREAMS)
return 0;
CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
@@ -192,7 +197,7 @@ int32 EntryImpl::GetDataSize(int index) const {
int EntryImpl::ReadData(int index, int offset, char* buf, int buf_len,
net::CompletionCallback* completion_callback) {
DCHECK(node_.Data()->dirty);
- if (index < 0 || index > 1)
+ if (index < 0 || index >= NUM_STREAMS)
return net::ERR_INVALID_ARGUMENT;
int entry_size = entry_.Data()->data_size[index];
@@ -258,7 +263,7 @@ int EntryImpl::WriteData(int index, int offset, const char* buf, int buf_len,
net::CompletionCallback* completion_callback,
bool truncate) {
DCHECK(node_.Data()->dirty);
- if (index < 0 || index > 1)
+ if (index < 0 || index >= NUM_STREAMS)
return net::ERR_INVALID_ARGUMENT;
if (offset < 0 || buf_len < 0)
@@ -372,6 +377,7 @@ bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
node->pointer = this;
entry_store->hash = hash;
+ entry_store->creation_time = Time::Now().ToInternalValue();
entry_store->key_len = static_cast<int32>(key.size());
if (entry_store->key_len > kMaxInternalKeyLength) {
Addr address(0);
@@ -507,7 +513,7 @@ void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
bool EntryImpl::CreateDataBlock(int index, int size) {
Addr address(entry_.Data()->data_addr[index]);
- DCHECK(0 == index || 1 == index);
+ DCHECK(index >= 0 && index < NUM_STREAMS);
if (!CreateBlock(size, &address))
return false;
@@ -577,10 +583,10 @@ File* EntryImpl::GetBackingFile(Addr address, int index) {
}
File* EntryImpl::GetExternalFile(Addr address, int index) {
- DCHECK(index >= 0 && index <= 2);
+ DCHECK(index >= 0 && index <= kKeyFileIndex);
if (!files_[index].get()) {
// For a key file, use mixed mode IO.
- scoped_refptr<File> file(new File(2 == index));
+ scoped_refptr<File> file(new File(kKeyFileIndex == index));
if (file->Init(backend_->GetFileName(address)))
files_[index].swap(file);
}
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
index b504b0bd..9b26c80 100644
--- a/net/disk_cache/entry_impl.h
+++ b/net/disk_cache/entry_impl.h
@@ -89,10 +89,11 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
void SetTimes(base::Time last_used, base::Time last_modified);
private:
- ~EntryImpl();
+ enum {
+ NUM_STREAMS = 3
+ };
- // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
- static const int kKeyFileIndex = 2;
+ ~EntryImpl();
// Initializes the storage for an internal or external data block.
bool CreateDataBlock(int index, int size);
@@ -134,9 +135,10 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
CacheEntryBlock entry_; // Key related information for this entry.
CacheRankingsBlock node_; // Rankings related information for this entry.
BackendImpl* backend_; // Back pointer to the cache.
- scoped_array<char> user_buffers_[2]; // Store user data.
- scoped_refptr<File> files_[3]; // Files to store external user data and key.
- int unreported_size_[2]; // Bytes not reported yet to the backend.
+ scoped_array<char> user_buffers_[NUM_STREAMS]; // Store user data.
+ scoped_refptr<File> files_[NUM_STREAMS + 1]; // Files to store external user
+ // data and key.
+ int unreported_size_[NUM_STREAMS]; // Bytes not reported yet to the backend.
bool doomed_; // True if this entry was removed from the cache.
DISALLOW_EVIL_CONSTRUCTORS(EntryImpl);
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
index 9793649..abd1008 100644
--- a/net/disk_cache/entry_unittest.cc
+++ b/net/disk_cache/entry_unittest.cc
@@ -24,6 +24,7 @@ class DiskCacheEntryTest : public DiskCacheTestWithCache {
void InternalAsyncIO();
void ExternalSyncIO();
void ExternalAsyncIO();
+ void StreamAccess();
void GetKey();
void GrowData();
void TruncateData();
@@ -373,6 +374,41 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
ExternalAsyncIO();
}
+void DiskCacheEntryTest::StreamAccess() {
+ disk_cache::Entry *entry = NULL;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry));
+ ASSERT_TRUE(NULL != entry);
+
+ const int kBufferSize = 1024;
+ char buffer1[kBufferSize];
+ char buffer2[kBufferSize];
+
+ const int kNumStreams = 3;
+ for (int i = 0; i < kNumStreams; i++) {
+ CacheTestFillBuffer(buffer1, kBufferSize, false);
+ EXPECT_EQ(kBufferSize, entry->WriteData(i, 0, buffer1, kBufferSize, NULL,
+ false));
+ memset(buffer2, 0, kBufferSize);
+ EXPECT_EQ(kBufferSize, entry->ReadData(i, 0, buffer2, kBufferSize, NULL));
+ EXPECT_EQ(0, memcmp(buffer1, buffer2, kBufferSize));
+ }
+
+ EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
+ entry->ReadData(kNumStreams, 0, buffer1, kBufferSize, NULL));
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, StreamAccess) {
+ InitCache();
+ StreamAccess();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
+ SetMemoryOnlyMode();
+ InitCache();
+ StreamAccess();
+}
+
void DiskCacheEntryTest::GetKey() {
std::string key1("the first key");
disk_cache::Entry *entry1;
diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc
index 2bbf4a5..6bc7b6b 100644
--- a/net/disk_cache/mem_entry_impl.cc
+++ b/net/disk_cache/mem_entry_impl.cc
@@ -15,12 +15,13 @@ MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
doomed_ = false;
backend_ = backend;
ref_count_ = 0;
- data_size_[0] = data_size_[1] = 0;
+ for (int i = 0; i < NUM_STREAMS; i++)
+ data_size_[i] = 0;
}
MemEntryImpl::~MemEntryImpl() {
- backend_->ModifyStorageSize(data_size_[0], 0);
- backend_->ModifyStorageSize(data_size_[1], 0);
+ for (int i = 0; i < NUM_STREAMS; i++)
+ backend_->ModifyStorageSize(data_size_[i], 0);
backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
}
@@ -75,7 +76,7 @@ Time MemEntryImpl::GetLastModified() const {
}
int32 MemEntryImpl::GetDataSize(int index) const {
- if (index < 0 || index > 1)
+ if (index < 0 || index >= NUM_STREAMS)
return 0;
return data_size_[index];
@@ -83,7 +84,7 @@ int32 MemEntryImpl::GetDataSize(int index) const {
int MemEntryImpl::ReadData(int index, int offset, char* buf, int buf_len,
net::CompletionCallback* completion_callback) {
- if (index < 0 || index > 1)
+ if (index < 0 || index >= NUM_STREAMS)
return net::ERR_INVALID_ARGUMENT;
int entry_size = GetDataSize(index);
@@ -105,7 +106,7 @@ int MemEntryImpl::ReadData(int index, int offset, char* buf, int buf_len,
int MemEntryImpl::WriteData(int index, int offset, const char* buf, int buf_len,
net::CompletionCallback* completion_callback,
bool truncate) {
- if (index < 0 || index > 1)
+ if (index < 0 || index >= NUM_STREAMS)
return net::ERR_INVALID_ARGUMENT;
if (offset < 0 || buf_len < 0)
diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h
index 98c3e4a..d6e44dd 100644
--- a/net/disk_cache/mem_entry_impl.h
+++ b/net/disk_cache/mem_entry_impl.h
@@ -57,6 +57,10 @@ class MemEntryImpl : public Entry {
bool InUse();
private:
+ enum {
+ NUM_STREAMS = 3
+ };
+
~MemEntryImpl();
// Grows and cleans up the data buffer.
@@ -66,8 +70,8 @@ class MemEntryImpl : public Entry {
void UpdateRank(bool modified);
std::string key_;
- std::vector<char> data_[2]; // User data.
- int32 data_size_[2];
+ std::vector<char> data_[NUM_STREAMS]; // User data.
+ int32 data_size_[NUM_STREAMS];
int ref_count_;
MemEntryImpl* next_; // Pointers for the LRU list.
diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc
index 82e5c7b..b690c75 100644
--- a/net/disk_cache/rankings.cc
+++ b/net/disk_cache/rankings.cc
@@ -16,10 +16,13 @@ disk_cache::RankCrashes g_rankings_crash = disk_cache::NO_CRASH;
namespace {
-const int kHeadIndex = 0;
-const int kTailIndex = 1;
-const int kTransactionIndex = 2;
-const int kOperationIndex = 3;
+enum Lists {
+ NO_USE = 0, // List of entries that have not been reused.
+ LOW_USE, // List of entries with low reuse.
+ HIGH_USE, // List of entries with high reuse.
+ DELETED, // List of recently deleted or doomed entries.
+ LAST_ELEMENT
+};
enum Operation {
INSERT = 1,
@@ -37,25 +40,29 @@ class Transaction {
// avoid having the compiler doing optimizations on when to read or write
// from user_data because it is the basis of the crash detection. Maybe
// volatile is not enough for that, but it should be a good hint.
- Transaction(volatile int32* user_data, disk_cache::Addr addr, Operation op);
+ Transaction(volatile disk_cache::LruData* data, disk_cache::Addr addr,
+ Operation op, int list);
~Transaction();
private:
- volatile int32* user_data_;
- DISALLOW_EVIL_CONSTRUCTORS(Transaction);
+ volatile disk_cache::LruData* data_;
+ DISALLOW_COPY_AND_ASSIGN(Transaction);
};
-Transaction::Transaction(volatile int32* user_data, disk_cache::Addr addr,
- Operation op) : user_data_(user_data) {
- DCHECK(!user_data_[kTransactionIndex]);
+Transaction::Transaction(volatile disk_cache::LruData* data,
+ disk_cache::Addr addr, Operation op, int list)
+ : data_(data) {
+ DCHECK(!data_->transaction);
DCHECK(addr.is_initialized());
- user_data_[kOperationIndex] = op;
- user_data_[kTransactionIndex] = static_cast<int32>(addr.value());
+ data_->operation = op;
+ data_->operation_list = list;
+ data_->transaction = addr.value();
}
Transaction::~Transaction() {
- DCHECK(user_data_[kTransactionIndex]);
- user_data_[kTransactionIndex] = 0;
- user_data_[kOperationIndex] = 0;
+ DCHECK(data_->transaction);
+ data_->transaction = 0;
+ data_->operation = 0;
+ data_->operation_list = 0;
}
// Code locations that can generate crashes.
@@ -160,14 +167,13 @@ bool Rankings::Init(BackendImpl* backend) {
return false;
backend_ = backend;
- MappedFile* file = backend_->File(Addr(RANKINGS, 0, 0, 0));
- header_ = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ control_data_ = backend_->GetLruData();
head_ = ReadHead();
tail_ = ReadTail();
- if (header_->user[kTransactionIndex])
+ if (control_data_->transaction)
CompleteTransaction();
init_ = true;
@@ -178,7 +184,7 @@ void Rankings::Reset() {
init_ = false;
head_.set_value(0);
tail_.set_value(0);
- header_ = NULL;
+ control_data_ = NULL;
}
bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
@@ -220,7 +226,7 @@ bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
void Rankings::Insert(CacheRankingsBlock* node, bool modified) {
Trace("Insert 0x%x", node->address().value());
DCHECK(node->HasData());
- Transaction lock(header_->user, node->address(), INSERT);
+ Transaction lock(control_data_, node->address(), INSERT, NO_USE);
CacheRankingsBlock head(backend_->File(head_), head_);
if (head_.is_initialized()) {
if (!GetRanking(&head))
@@ -307,7 +313,7 @@ void Rankings::Remove(CacheRankingsBlock* node) {
if (!CheckLinks(node, &prev, &next))
return;
- Transaction lock(header_->user, node->address(), REMOVE);
+ Transaction lock(control_data_, node->address(), REMOVE, NO_USE);
prev.Data()->next = next.address().value();
next.Data()->prev = prev.address().value();
GenerateCrash(ON_REMOVE_1);
@@ -368,7 +374,7 @@ void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified) {
}
void Rankings::CompleteTransaction() {
- Addr node_addr(static_cast<CacheAddr>(header_->user[kTransactionIndex]));
+ Addr node_addr(static_cast<CacheAddr>(control_data_->transaction));
if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
NOTREACHED();
LOG(ERROR) << "Invalid rankings info.";
@@ -387,10 +393,10 @@ void Rankings::CompleteTransaction() {
// We want to leave the node inside the list. The entry must me marked as
// dirty, and will be removed later. Otherwise, we'll get assertions when
// attempting to remove the dirty entry.
- if (INSERT == header_->user[kOperationIndex]) {
+ if (INSERT == control_data_->operation) {
Trace("FinishInsert h:0x%x t:0x%x", head_.value(), tail_.value());
FinishInsert(&node);
- } else if (REMOVE == header_->user[kOperationIndex]) {
+ } else if (REMOVE == control_data_->operation) {
Trace("RevertRemove h:0x%x t:0x%x", head_.value(), tail_.value());
RevertRemove(&node);
} else {
@@ -400,8 +406,8 @@ void Rankings::CompleteTransaction() {
}
void Rankings::FinishInsert(CacheRankingsBlock* node) {
- header_->user[kTransactionIndex] = 0;
- header_->user[kOperationIndex] = 0;
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
if (head_.value() != node->address().value()) {
if (tail_.value() == node->address().value()) {
// This part will be skipped by the logic of Insert.
@@ -420,13 +426,13 @@ void Rankings::RevertRemove(CacheRankingsBlock* node) {
Addr prev_addr(node->Data()->prev);
if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
// The operation actually finished. Nothing to do.
- header_->user[kTransactionIndex] = 0;
+ control_data_->transaction = 0;
return;
}
if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
NOTREACHED();
LOG(WARNING) << "Invalid rankings info.";
- header_->user[kTransactionIndex] = 0;
+ control_data_->transaction = 0;
return;
}
@@ -465,8 +471,8 @@ void Rankings::RevertRemove(CacheRankingsBlock* node) {
next.Store();
prev.Store();
- header_->user[kTransactionIndex] = 0;
- header_->user[kOperationIndex] = 0;
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
}
CacheRankingsBlock* Rankings::GetNext(CacheRankingsBlock* node) {
@@ -588,21 +594,19 @@ bool Rankings::SanityCheck(CacheRankingsBlock* node, bool from_list) {
}
Addr Rankings::ReadHead() {
- CacheAddr head = static_cast<CacheAddr>(header_->user[kHeadIndex]);
- return Addr(head);
+ return Addr(control_data_->heads[NO_USE]);
}
Addr Rankings::ReadTail() {
- CacheAddr tail = static_cast<CacheAddr>(header_->user[kTailIndex]);
- return Addr(tail);
+ return Addr(control_data_->tails[NO_USE]);
}
void Rankings::WriteHead() {
- header_->user[kHeadIndex] = static_cast<int32>(head_.value());
+ control_data_->heads[NO_USE] = head_.value();
}
void Rankings::WriteTail() {
- header_->user[kTailIndex] = static_cast<int32>(tail_.value());
+ control_data_->tails[NO_USE] = tail_.value();
}
bool Rankings::CheckEntry(CacheRankingsBlock* rankings) {
diff --git a/net/disk_cache/rankings.h b/net/disk_cache/rankings.h
index 1a2dadc..4347fa2 100644
--- a/net/disk_cache/rankings.h
+++ b/net/disk_cache/rankings.h
@@ -4,8 +4,8 @@
// See net/disk_cache/disk_cache.h for the public interface.
-#ifndef NET_DISK_CACHE_RANKINGS_H__
-#define NET_DISK_CACHE_RANKINGS_H__
+#ifndef NET_DISK_CACHE_RANKINGS_H_
+#define NET_DISK_CACHE_RANKINGS_H_
#include <list>
@@ -141,14 +141,14 @@ class Rankings {
bool init_;
Addr head_;
Addr tail_;
- BlockFileHeader* header_; // Header of the block-file used to store rankings.
BackendImpl* backend_;
+ LruData* control_data_; // Data related to the LRU lists.
IteratorList iterators_;
- DISALLOW_EVIL_CONSTRUCTORS(Rankings);
+ DISALLOW_COPY_AND_ASSIGN(Rankings);
};
} // namespace disk_cache
-#endif // NET_DISK_CACHE_RANKINGS_H__
+#endif // NET_DISK_CACHE_RANKINGS_H_