summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/disk_cache/backend_impl.cc65
-rw-r--r--net/disk_cache/backend_impl.h22
-rw-r--r--net/disk_cache/backend_unittest.cc90
-rw-r--r--net/disk_cache/disk_cache_test_base.cc29
-rw-r--r--net/disk_cache/disk_cache_test_base.h8
-rw-r--r--net/disk_cache/entry_impl.cc1
-rw-r--r--net/disk_cache/eviction.cc42
-rw-r--r--net/disk_cache/eviction.h5
-rw-r--r--net/tools/dump_cache/dump_files.cc42
9 files changed, 255 insertions, 49 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 1161c5d..14bcd4e 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -674,7 +674,8 @@ EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
uint32 hash = Hash(key);
Trace("Open hash 0x%x", hash);
- EntryImpl* cache_entry = MatchEntry(key, hash, false);
+ bool error;
+ EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
if (!cache_entry) {
stats_.OnEvent(Stats::OPEN_MISS);
return NULL;
@@ -708,11 +709,13 @@ EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
if (entry_address.is_initialized()) {
// We have an entry already. It could be the one we are looking for, or just
// a hash conflict.
- EntryImpl* old_entry = MatchEntry(key, hash, false);
+ bool error;
+ EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
if (old_entry)
return ResurrectEntry(old_entry);
- EntryImpl* parent_entry = MatchEntry(key, hash, true);
+ EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
+ DCHECK(!error);
if (parent_entry) {
parent.swap(&parent_entry);
} else if (data_->table[hash & mask_]) {
@@ -897,7 +900,9 @@ void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
uint32 hash = entry->GetHash();
std::string key = entry->GetKey();
- EntryImpl* parent_entry = MatchEntry(key, hash, true);
+ Addr entry_addr = entry->entry()->address();
+ bool error;
+ EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
CacheAddr child(entry->GetNextAddress());
Trace("Doom entry 0x%p", entry);
@@ -908,7 +913,7 @@ void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
if (parent_entry) {
parent_entry->SetNextAddress(Addr(child));
parent_entry->Release();
- } else {
+ } else if (!error) {
data_->table[hash & mask_] = child;
}
@@ -1229,6 +1234,16 @@ void BackendImpl::ThrottleRequestsForTest(bool throttle) {
background_queue_.StopQueingOperations();
}
+void BackendImpl::TrimForTest(bool empty) {
+ eviction_.SetTestMode();
+ eviction_.TrimCache(empty);
+}
+
+void BackendImpl::TrimDeletedListForTest(bool empty) {
+ eviction_.SetTestMode();
+ eviction_.TrimDeletedList(empty);
+}
+
int BackendImpl::SelfCheck() {
if (!init_) {
LOG(ERROR) << "Init failed";
@@ -1545,16 +1560,28 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) {
}
EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
- bool find_parent) {
+ bool find_parent, Addr entry_addr,
+ bool* match_error) {
Addr address(data_->table[hash & mask_]);
scoped_refptr<EntryImpl> cache_entry, parent_entry;
EntryImpl* tmp = NULL;
bool found = false;
+ std::set<CacheAddr> visited;
+ *match_error = false;
for (;;) {
if (disabled_)
break;
+ if (visited.find(address.value()) != visited.end()) {
+ // It's possible for a buggy version of the code to write a loop. Just
+ // break it.
+ Trace("Hash collision loop 0x%x", address.value());
+ address.set_value(0);
+ parent_entry->SetNextAddress(address);
+ }
+ visited.insert(address.value());
+
if (!address.is_initialized()) {
if (find_parent)
found = true;
@@ -1590,6 +1617,7 @@ EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
// Restart the search.
address.set_value(data_->table[hash & mask_]);
+ visited.clear();
continue;
}
@@ -1598,6 +1626,11 @@ EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
if (!cache_entry->Update())
cache_entry = NULL;
found = true;
+ if (find_parent && entry_addr.value() != address.value()) {
+ Trace("Entry not on the index 0x%x", address.value());
+ *match_error = true;
+ parent_entry = NULL;
+ }
break;
}
if (!cache_entry->Update())
@@ -1655,7 +1688,7 @@ EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
OpenFollowingEntryFromList(forward, iterator->list,
&iterator->nodes[i], &temp);
} else {
- temp = GetEnumeratedEntry(iterator->nodes[i], false);
+ temp = GetEnumeratedEntry(iterator->nodes[i]);
}
entries[i].swap(&temp); // The entry was already addref'd.
@@ -1712,7 +1745,7 @@ bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
Rankings::ScopedRankingsBlock next(&rankings_, next_block);
*from_entry = NULL;
- *next_entry = GetEnumeratedEntry(next.get(), false);
+ *next_entry = GetEnumeratedEntry(next.get());
if (!*next_entry)
return false;
@@ -1720,8 +1753,7 @@ bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
return true;
}
-EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
- bool to_evict) {
+EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next) {
if (!next || disabled_)
return NULL;
@@ -1736,12 +1768,19 @@ EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
return NULL;
}
- // There is no need to store the entry to disk if we want to delete it.
- if (!to_evict && !entry->Update()) {
+ if (!entry->Update()) {
entry->Release();
return NULL;
}
+ // Note that it is unfortunate (but possible) for this entry to be clean, but
+ // not actually the real entry. In other words, we could have lost this entry
+ // from the index, and it could have been replaced with a newer one. It's not
+ // worth checking that this entry is "the real one", so we just return it and
+ // let the enumeration continue; this entry will be evicted at some point, and
+ // the regular path will work with the real entry. With time, this problem
+ // will disasappear because this scenario is just a bug.
+
// Make sure that we save the key for later.
entry->GetKey();
@@ -1793,7 +1832,7 @@ void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
void BackendImpl::DestroyInvalidEntryFromEnumeration(EntryImpl* entry) {
std::string key = entry->GetKey();
entry->SetPointerForInvalidEntry(GetCurrentEntryId());
- CacheAddr next_entry = entry->entry()->Data()->next;
+ CacheAddr next_entry = entry->GetNextAddress();
if (!next_entry) {
DestroyInvalidEntry(entry);
entry->Release();
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index 74a1eafd..be491e6 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -240,6 +240,14 @@ class BackendImpl : public Backend {
// Starts or stops throttling requests.
void ThrottleRequestsForTest(bool throttle);
+ // Trims an entry (all if |empty| is true) from the list of deleted
+ // entries. This method should be called directly on the cache thread.
+ void TrimForTest(bool empty);
+
+ // Trims an entry (all if |empty| is true) from the list of deleted
+ // entries. This method should be called directly on the cache thread.
+ void TrimDeletedListForTest(bool empty);
+
// Peforms a simple self-check, and returns the number of dirty items
// or an error code (negative value).
int SelfCheck();
@@ -280,8 +288,13 @@ class BackendImpl : public Backend {
// Returns a given entry from the cache. The entry to match is determined by
// key and hash, and the returned entry may be the matched one or it's parent
- // on the list of entries with the same hash (or bucket).
- EntryImpl* MatchEntry(const std::string& key, uint32 hash, bool find_parent);
+ // on the list of entries with the same hash (or bucket). To look for a parent
+ // of a given entry, |entry_addr| should be grabbed from that entry, so that
+ // if it doesn't match the entry on the index, we know that it was replaced
+ // with a new entry; in this case |*match_error| will be set to true and the
+ // return value will be NULL.
+ EntryImpl* MatchEntry(const std::string& key, uint32 hash, bool find_parent,
+ Addr entry_addr, bool* match_error);
// Opens the next or previous entry on a cache iteration.
EntryImpl* OpenFollowingEntry(bool forward, void** iter);
@@ -293,9 +306,8 @@ class BackendImpl : public Backend {
CacheRankingsBlock** from_entry,
EntryImpl** next_entry);
- // Returns the entry that is pointed by |next|. If we are trimming the cache,
- // |to_evict| should be true so that we don't perform extra disk writes.
- EntryImpl* GetEnumeratedEntry(CacheRankingsBlock* next, bool to_evict);
+ // Returns the entry that is pointed by |next|.
+ EntryImpl* GetEnumeratedEntry(CacheRankingsBlock* next);
// Re-opens an entry that was previously deleted.
EntryImpl* ResurrectEntry(EntryImpl* deleted_entry);
diff --git a/net/disk_cache/backend_unittest.cc b/net/disk_cache/backend_unittest.cc
index 05f17f6..1760837 100644
--- a/net/disk_cache/backend_unittest.cc
+++ b/net/disk_cache/backend_unittest.cc
@@ -49,6 +49,7 @@ class DiskCacheBackendTest : public DiskCacheTestWithCache {
void BackendRecoverInsert();
void BackendRecoverRemove();
void BackendInvalidEntry2();
+ void BackendInvalidEntry3();
void BackendNotMarkedButDirty(const std::string& name);
void BackendDoomAll();
void BackendDoomAll2();
@@ -1318,7 +1319,96 @@ TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
BackendInvalidEntry2();
}
+// Tests that we don't crash or hang when enumerating this cache.
+void DiskCacheBackendTest::BackendInvalidEntry3() {
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ while (OpenNextEntry(&iter, &entry) == net::OK) {
+ entry->Close();
+ }
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry3) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry3"));
+ BackendInvalidEntry3();
+}
+
TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry4"));
+ SetNewEviction();
+ BackendInvalidEntry3();
+ DisableIntegrityCheck();
+}
+
+// Test that we handle a dirty entry on the LRU list, already replaced with
+// the same key, and with hash collisions.
+TEST_F(DiskCacheBackendTest, InvalidEntry4) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry3"));
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ TrimForTest(false);
+}
+
+// Test that we handle a dirty entry on the deleted list, already replaced with
+// the same key, and with hash collisions.
+TEST_F(DiskCacheBackendTest, InvalidEntry5) {
+ ASSERT_TRUE(CopyTestCache("dirty_entry4"));
+ SetNewEviction();
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ TrimDeletedListForTest(false);
+}
+
+// Tests that we don't hang when there is a loop on the hash collision list.
+// The test cache could be a result of bug 69135.
+TEST_F(DiskCacheBackendTest, BadNextEntry1) {
+ ASSERT_TRUE(CopyTestCache("list_loop2"));
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ // The second entry points at itselft, and the first entry is not accessible
+ // though the index, but it is at the head of the LRU.
+
+ disk_cache::Entry* entry;
+ ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
+ entry->Close();
+
+ TrimForTest(false);
+ TrimForTest(false);
+ ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
+ entry->Close();
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+// Tests that we don't hang when there is a loop on the hash collision list.
+// The test cache could be a result of bug 69135.
+TEST_F(DiskCacheBackendTest, BadNextEntry2) {
+ ASSERT_TRUE(CopyTestCache("list_loop3"));
+ SetMask(0x1); // 2-entry table.
+ SetMaxSize(0x3000); // 12 kB.
+ DisableFirstCleanup();
+ InitCache();
+
+ // There is a wide loop of 5 entries.
+
+ disk_cache::Entry* entry;
+ ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
ASSERT_TRUE(CopyTestCache("bad_rankings3"));
DisableFirstCleanup();
SetNewEviction();
diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc
index 3860667..41d1caf 100644
--- a/net/disk_cache/disk_cache_test_base.cc
+++ b/net/disk_cache/disk_cache_test_base.cc
@@ -176,6 +176,35 @@ int DiskCacheTestWithCache::WriteSparseData(disk_cache::Entry* entry,
return cb.GetResult(rv);
}
+// Simple task to run part of a test from the cache thread.
+class TrimTask : public Task {
+ public:
+ TrimTask(disk_cache::BackendImpl* backend, bool deleted, bool empty)
+ : backend_(backend),
+ deleted_(deleted),
+ empty_(empty) {}
+
+ virtual void Run() {
+ if (deleted_)
+ backend_->TrimDeletedListForTest(empty_);
+ else
+ backend_->TrimForTest(empty_);
+ }
+
+ protected:
+ disk_cache::BackendImpl* backend_;
+ bool deleted_;
+ bool empty_;
+};
+
+void DiskCacheTestWithCache::TrimForTest(bool empty) {
+ RunTaskForTest(new TrimTask(cache_impl_, false, empty));
+}
+
+void DiskCacheTestWithCache::TrimDeletedListForTest(bool empty) {
+ RunTaskForTest(new TrimTask(cache_impl_, true, empty));
+}
+
void DiskCacheTestWithCache::TearDown() {
MessageLoop::current()->RunAllPending();
delete cache_;
diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h
index 0fd98b8..f535896 100644
--- a/net/disk_cache/disk_cache_test_base.h
+++ b/net/disk_cache/disk_cache_test_base.h
@@ -107,6 +107,14 @@ class DiskCacheTestWithCache : public DiskCacheTest {
int WriteSparseData(disk_cache::Entry* entry, int64 offset,
net::IOBuffer* buf, int len);
+ // Asks the cache to trim a an entry. If |empty| is true, the whole entry is
+ // deleted.
+ void TrimForTest(bool empty);
+
+ // Asks the cache to trim a an entry from the deleted list. If |empty| is
+ // true, the whole entry is deleted.
+ void TrimDeletedListForTest(bool empty);
+
// DiskCacheTest:
virtual void TearDown();
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index ff05e11..57d4d83 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -589,6 +589,7 @@ CacheAddr EntryImpl::GetNextAddress() {
}
void EntryImpl::SetNextAddress(Addr address) {
+ DCHECK_NE(address.value(), entry_.address().value());
entry_.Data()->next = address.value();
bool success = entry_.Store();
DCHECK(success);
diff --git a/net/disk_cache/eviction.cc b/net/disk_cache/eviction.cc
index 0173c16..6ba053b 100644
--- a/net/disk_cache/eviction.cc
+++ b/net/disk_cache/eviction.cc
@@ -82,6 +82,7 @@ void Eviction::Init(BackendImpl* backend) {
delay_trim_ = false;
trim_delays_ = 0;
init_ = true;
+ test_mode_ = false;
in_experiment_ = (header_->experiment == EXPERIMENT_DELETED_LIST_IN);
}
@@ -125,11 +126,13 @@ void Eviction::TrimCache(bool empty) {
// This entry is not being used by anybody.
// Do NOT use node as an iterator after this point.
rankings_->TrackRankingsBlock(node.get(), false);
- if (!EvictEntry(node.get(), empty))
+ if (!EvictEntry(node.get(), empty) && !test_mode_)
continue;
if (!empty) {
backend_->OnEvent(Stats::TRIM_ENTRY);
+ if (test_mode_)
+ break;
if ((TimeTicks::Now() - start).InMilliseconds() > 20) {
MessageLoop::current()->PostTask(FROM_HERE,
@@ -182,6 +185,15 @@ void Eviction::OnDestroyEntry(EntryImpl* entry) {
return OnDestroyEntryV2(entry);
}
+void Eviction::SetTestMode() {
+ test_mode_ = true;
+}
+
+void Eviction::TrimDeletedList(bool empty) {
+ DCHECK(test_mode_ && new_eviction_);
+ TrimDeleted(empty);
+}
+
void Eviction::PostDelayedTrim() {
// Prevent posting multiple tasks.
if (delay_trim_)
@@ -242,7 +254,7 @@ Rankings::List Eviction::GetListForEntry(EntryImpl* entry) {
}
bool Eviction::EvictEntry(CacheRankingsBlock* node, bool empty) {
- EntryImpl* entry = backend_->GetEnumeratedEntry(node, true);
+ EntryImpl* entry = backend_->GetEnumeratedEntry(node);
if (!entry) {
Trace("NewEntry failed on Trim 0x%x", node->address().value());
return false;
@@ -320,9 +332,12 @@ void Eviction::TrimCacheV2(bool empty) {
// This entry is not being used by anybody.
// Do NOT use node as an iterator after this point.
rankings_->TrackRankingsBlock(node.get(), false);
- if (!EvictEntry(node.get(), empty))
+ if (!EvictEntry(node.get(), empty) && !test_mode_)
continue;
+ if (!empty && test_mode_)
+ break;
+
if (!empty && (TimeTicks::Now() - start).InMilliseconds() > 20) {
MessageLoop::current()->PostTask(FROM_HERE,
factory_.NewRunnableMethod(&Eviction::TrimCache, false));
@@ -336,7 +351,8 @@ void Eviction::TrimCacheV2(bool empty) {
if (empty) {
TrimDeleted(true);
- } else if (header_->lru.sizes[Rankings::DELETED] > header_->num_entries / 4) {
+ } else if (header_->lru.sizes[Rankings::DELETED] > header_->num_entries / 4 &&
+ !test_mode_) {
MessageLoop::current()->PostTask(FROM_HERE,
factory_.NewRunnableMethod(&Eviction::TrimDeleted, empty));
}
@@ -451,6 +467,8 @@ void Eviction::TrimDeleted(bool empty) {
node.reset(next.release());
next.reset(rankings_->GetPrev(node.get(), Rankings::DELETED));
deleted |= RemoveDeletedNode(node.get());
+ if (test_mode_)
+ break;
}
// Normally we use 25% for each list. The experiment doubles the number of
@@ -459,10 +477,11 @@ void Eviction::TrimDeleted(bool empty) {
// lists intact.
int max_length = in_experiment_ ? header_->num_entries * 2 / 5 :
header_->num_entries / 4;
- if (deleted && !empty &&
- header_->lru.sizes[Rankings::DELETED] > max_length)
+ if (deleted && !empty && !test_mode_ &&
+ header_->lru.sizes[Rankings::DELETED] > max_length) {
MessageLoop::current()->PostTask(FROM_HERE,
factory_.NewRunnableMethod(&Eviction::TrimDeleted, false));
+ }
CACHE_UMA(AGE_MS, "TotalTrimDeletedTime", 0, start);
Trace("*** Trim Deleted end ***");
@@ -470,19 +489,12 @@ void Eviction::TrimDeleted(bool empty) {
}
bool Eviction::RemoveDeletedNode(CacheRankingsBlock* node) {
- EntryImpl* entry;
- bool dirty;
- if (backend_->NewEntry(Addr(node->Data()->contents), &entry, &dirty)) {
+ EntryImpl* entry = backend_->GetEnumeratedEntry(node);
+ if (!entry) {
Trace("NewEntry failed on Trim 0x%x", node->address().value());
return false;
}
- // TODO(rvargas): figure out how to deal with corruption at this point (dirty
- // entries that live in this list).
- if (node->Data()->dirty) {
- // We ignore the failure; we're removing the entry anyway.
- entry->Update();
- }
bool doomed = (entry->entry()->Data()->state == ENTRY_DOOMED);
entry->entry()->Data()->state = ENTRY_DOOMED;
entry->DoomImpl();
diff --git a/net/disk_cache/eviction.h b/net/disk_cache/eviction.h
index 0f5eeb7..26f88fe 100644
--- a/net/disk_cache/eviction.h
+++ b/net/disk_cache/eviction.h
@@ -40,6 +40,10 @@ class Eviction {
void OnDoomEntry(EntryImpl* entry);
void OnDestroyEntry(EntryImpl* entry);
+ // Testing interface.
+ void SetTestMode();
+ void TrimDeletedList(bool empty);
+
private:
void PostDelayedTrim();
void DelayedTrim();
@@ -75,6 +79,7 @@ class Eviction {
bool trimming_;
bool delay_trim_;
bool init_;
+ bool test_mode_;
bool in_experiment_;
ScopedRunnableMethodFactory<Eviction> factory_;
diff --git a/net/tools/dump_cache/dump_files.cc b/net/tools/dump_cache/dump_files.cc
index 2f37c5d..bffc6b7 100644
--- a/net/tools/dump_cache/dump_files.cc
+++ b/net/tools/dump_cache/dump_files.cc
@@ -6,6 +6,7 @@
// to the actual files (they still may change if an error is detected on the
// files).
+#include <set>
#include <stdio.h>
#include <string>
@@ -67,6 +68,7 @@ void DumpIndexHeader(const std::wstring& name) {
for (int i = 0; i < 5; i++) {
printf("head %d: 0x%x\n", i, header.lru.heads[i]);
printf("tail %d: 0x%x\n", i, header.lru.tails[i]);
+ printf("size %d: 0x%x\n", i, header.lru.sizes[i]);
}
printf("transaction: 0x%x\n", header.lru.transaction);
printf("operation: %d\n", header.lru.operation);
@@ -131,6 +133,7 @@ class CacheDumper {
disk_cache::Index* index_;
int current_hash_;
disk_cache::CacheAddr next_addr_;
+ std::set<disk_cache::CacheAddr> dumped_entries_;
DISALLOW_COPY_AND_ASSIGN(CacheDumper);
};
@@ -154,17 +157,19 @@ bool CacheDumper::Init() {
}
bool CacheDumper::GetEntry(disk_cache::EntryStore* entry) {
+ if (dumped_entries_.find(next_addr_) != dumped_entries_.end()) {
+ printf("Loop detected\n");
+ next_addr_ = 0;
+ current_hash_++;
+ }
+
if (next_addr_) {
- if (LoadEntry(next_addr_, entry)) {
- next_addr_ = entry->next;
- if (!next_addr_)
- current_hash_++;
+ if (LoadEntry(next_addr_, entry))
return true;
- } else {
- printf("Unable to load entry at address 0x%x\n", next_addr_);
- next_addr_ = 0;
- current_hash_++;
- }
+
+ printf("Unable to load entry at address 0x%x\n", next_addr_);
+ next_addr_ = 0;
+ current_hash_++;
}
for (int i = current_hash_; i < index_->header.table_len; i++) {
@@ -172,14 +177,10 @@ bool CacheDumper::GetEntry(disk_cache::EntryStore* entry) {
// dumping every entry that we can find.
if (index_->table[i]) {
current_hash_ = i;
- if (LoadEntry(index_->table[i], entry)) {
- next_addr_ = entry->next;
- if (!next_addr_)
- current_hash_++;
+ if (LoadEntry(index_->table[i], entry))
return true;
- } else {
- printf("Unable to load entry at address 0x%x\n", index_->table[i]);
- }
+
+ printf("Unable to load entry at address 0x%x\n", index_->table[i]);
}
}
return false;
@@ -198,6 +199,15 @@ bool CacheDumper::LoadEntry(disk_cache::CacheAddr addr,
memcpy(entry, entry_block.Data(), sizeof(*entry));
printf("Entry at 0x%x\n", addr);
+
+ // Prepare for the next entry to load.
+ next_addr_ = entry->next;
+ if (next_addr_) {
+ dumped_entries_.insert(addr);
+ } else {
+ current_hash_++;
+ dumped_entries_.clear();
+ }
return true;
}