diff options
author | gavinp <gavinp@chromium.org> | 2014-09-20 06:20:43 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2014-09-20 13:21:01 +0000 |
commit | edbfba945a76d97383122a01529abfe3ce63d34d (patch) | |
tree | 42ed81a84d4c746fa29dc24db7f44113489bfbb1 /net/disk_cache | |
parent | 8ba4546fd0922508e212348f19dd2b624290c683 (diff) | |
download | chromium_src-edbfba945a76d97383122a01529abfe3ce63d34d.zip chromium_src-edbfba945a76d97383122a01529abfe3ce63d34d.tar.gz chromium_src-edbfba945a76d97383122a01529abfe3ce63d34d.tar.bz2 |
Reland of "Remove void** from disk_cache interface."
Enumeration and iteration were passing around void**. With this CL, we
instead use an Iterator object.
TBR=clamy@chromium.org,jkarlin@chromium.org,jsbell@chromium.org
BUG=413644
Review URL: https://codereview.chromium.org/583283002
Cr-Commit-Position: refs/heads/master@{#295870}
Diffstat (limited to 'net/disk_cache')
-rw-r--r-- | net/disk_cache/backend_unittest.cc | 154 | ||||
-rw-r--r-- | net/disk_cache/blockfile/backend_impl.cc | 34 | ||||
-rw-r--r-- | net/disk_cache/blockfile/backend_impl.h | 13 | ||||
-rw-r--r-- | net/disk_cache/blockfile/backend_impl_v3.cc | 46 | ||||
-rw-r--r-- | net/disk_cache/blockfile/backend_impl_v3.h | 6 | ||||
-rw-r--r-- | net/disk_cache/disk_cache.h | 49 | ||||
-rw-r--r-- | net/disk_cache/disk_cache_test_base.cc | 22 | ||||
-rw-r--r-- | net/disk_cache/disk_cache_test_base.h | 14 | ||||
-rw-r--r-- | net/disk_cache/entry_unittest.cc | 8 | ||||
-rw-r--r-- | net/disk_cache/memory/mem_backend_impl.cc | 60 | ||||
-rw-r--r-- | net/disk_cache/memory/mem_backend_impl.h | 11 | ||||
-rw-r--r-- | net/disk_cache/simple/simple_backend_impl.cc | 158 | ||||
-rw-r--r-- | net/disk_cache/simple/simple_backend_impl.h | 45 |
13 files changed, 322 insertions, 298 deletions
diff --git a/net/disk_cache/backend_unittest.cc b/net/disk_cache/backend_unittest.cc index c3f2cfa..572691c 100644 --- a/net/disk_cache/backend_unittest.cc +++ b/net/disk_cache/backend_unittest.cc @@ -81,7 +81,7 @@ class DiskCacheBackendTest : public DiskCacheTestWithCache { bool CreateSetOfRandomEntries(std::set<std::string>* key_pool); bool EnumerateAndMatchKeys(int max_to_open, - void** iter, + TestIterator* iter, std::set<std::string>* keys_to_match, size_t* count); @@ -256,12 +256,14 @@ bool DiskCacheBackendTest::CreateSetOfRandomEntries( // OpenNextEntry stops returning net::OK. bool DiskCacheBackendTest::EnumerateAndMatchKeys( int max_to_open, - void** iter, + TestIterator* iter, std::set<std::string>* keys_to_match, size_t* count) { disk_cache::Entry* entry; - while (OpenNextEntry(iter, &entry) == net::OK) { + if (!iter) + return false; + while (iter->OpenNextEntry(&entry) == net::OK) { if (!entry) return false; EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey())); @@ -1237,11 +1239,11 @@ void DiskCacheBackendTest::BackendEnumerations() { Time final = Time::Now(); disk_cache::Entry* entry; - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); int count = 0; Time last_modified[kNumEntries]; Time last_used[kNumEntries]; - while (OpenNextEntry(&iter, &entry) == net::OK) { + while (iter->OpenNextEntry(&entry) == net::OK) { ASSERT_TRUE(NULL != entry); if (count < kNumEntries) { last_modified[count] = entry->GetLastModified(); @@ -1255,10 +1257,10 @@ void DiskCacheBackendTest::BackendEnumerations() { }; EXPECT_EQ(kNumEntries, count); - iter = NULL; + iter = CreateIterator(); count = 0; // The previous enumeration should not have changed the timestamps. - while (OpenNextEntry(&iter, &entry) == net::OK) { + while (iter->OpenNextEntry(&entry) == net::OK) { ASSERT_TRUE(NULL != entry); if (count < kNumEntries) { EXPECT_TRUE(last_modified[count] == entry->GetLastModified()); @@ -1309,8 +1311,8 @@ void DiskCacheBackendTest::BackendEnumerations2() { // Make sure that the timestamp is not the same. AddDelay(); ASSERT_EQ(net::OK, OpenEntry(second, &entry1)); - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); EXPECT_EQ(entry2->GetKey(), second); // Two entries and the iterator pointing at "first". @@ -1318,15 +1320,15 @@ void DiskCacheBackendTest::BackendEnumerations2() { entry2->Close(); // The iterator should still be valid, so we should not crash. - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); EXPECT_EQ(entry2->GetKey(), first); entry2->Close(); - cache_->EndEnumeration(&iter); + iter = CreateIterator(); // Modify the oldest entry and get the newest element. ASSERT_EQ(net::OK, OpenEntry(first, &entry1)); EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false)); - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); if (type_ == net::APP_CACHE) { // The list is not updated. EXPECT_EQ(entry2->GetKey(), second); @@ -1336,7 +1338,6 @@ void DiskCacheBackendTest::BackendEnumerations2() { entry1->Close(); entry2->Close(); - cache_->EndEnumeration(&iter); } TEST_F(DiskCacheBackendTest, Enumerations2) { @@ -1391,11 +1392,10 @@ TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) { EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize)); entry1->Close(); - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2)); EXPECT_EQ(entry2->GetKey(), second); entry2->Close(); - cache_->EndEnumeration(&iter); } #if !defined(LEAK_SANITIZER) @@ -1424,9 +1424,9 @@ void DiskCacheBackendTest::BackendInvalidEntryEnumeration() { SimulateCrash(); - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); int count = 0; - while (OpenNextEntry(&iter, &entry) == net::OK) { + while (iter->OpenNextEntry(&entry) == net::OK) { ASSERT_TRUE(NULL != entry); EXPECT_EQ(key2, entry->GetKey()); entry->Close(); @@ -1466,9 +1466,8 @@ void DiskCacheBackendTest::BackendFixEnumerators() { EXPECT_EQ(kNumEntries, cache_->GetEntryCount()); disk_cache::Entry *entry1, *entry2; - void* iter1 = NULL; - void* iter2 = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1)); + scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator(); + ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1)); ASSERT_TRUE(NULL != entry1); entry1->Close(); entry1 = NULL; @@ -1477,17 +1476,17 @@ void DiskCacheBackendTest::BackendFixEnumerators() { for (int i = 0; i < kNumEntries / 2; i++) { if (entry1) entry1->Close(); - ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1)); + ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1)); ASSERT_TRUE(NULL != entry1); - ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); + ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2)); ASSERT_TRUE(NULL != entry2); entry2->Close(); } // Messing up with entry1 will modify entry2->next. entry1->Doom(); - ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); + ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2)); ASSERT_TRUE(NULL != entry2); // The link entry2->entry1 should be broken. @@ -1496,12 +1495,9 @@ void DiskCacheBackendTest::BackendFixEnumerators() { entry2->Close(); // And the second iterator should keep working. - ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2)); + ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2)); ASSERT_TRUE(NULL != entry2); entry2->Close(); - - cache_->EndEnumeration(&iter1); - cache_->EndEnumeration(&iter2); } TEST_F(DiskCacheBackendTest, FixEnumerators) { @@ -2027,8 +2023,8 @@ void DiskCacheBackendTest::BackendInvalidEntry3() { InitCache(); disk_cache::Entry* entry; - void* iter = NULL; - while (OpenNextEntry(&iter, &entry) == net::OK) { + scoped_ptr<TestIterator> iter = CreateIterator(); + while (iter->OpenNextEntry(&entry) == net::OK) { entry->Close(); } } @@ -2172,8 +2168,8 @@ void DiskCacheBackendTest::BackendInvalidEntry7() { EXPECT_EQ(1, cache_->GetEntryCount()); // We should delete the cache. The list still has a corrupt node. - void* iter = NULL; - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + scoped_ptr<TestIterator> iter = CreateIterator(); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); FlushQueueForTest(); EXPECT_EQ(0, cache_->GetEntryCount()); } @@ -2216,10 +2212,10 @@ void DiskCacheBackendTest::BackendInvalidEntry8() { EXPECT_EQ(1, cache_->GetEntryCount()); // We should not delete the cache. - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); entry->Close(); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); EXPECT_EQ(1, cache_->GetEntryCount()); } @@ -2266,13 +2262,13 @@ void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) { } else { // We should detect the problem through the list, but we should not delete // the entry, just fail the iteration. - void* iter = NULL; - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + scoped_ptr<TestIterator> iter = CreateIterator(); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); // Now a full iteration will work, and return one entry. - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); entry->Close(); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); // This should detect what's left of the bad entry. EXPECT_NE(net::OK, OpenEntry(second, &entry)); @@ -2343,13 +2339,13 @@ void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) { // Detection order: third -> second -> first. // We should detect the problem through the list, but we should not delete // the entry. - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); entry->Close(); - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); EXPECT_EQ(first, entry->GetKey()); entry->Close(); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); } DisableIntegrityCheck(); } @@ -2410,17 +2406,17 @@ void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) { // Detection order: third -> second. // We should detect the problem through the list, but we should not delete // the entry, just fail the iteration. - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); entry->Close(); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); // Now a full iteration will work, and return two entries. - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); entry->Close(); - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); entry->Close(); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); } DisableIntegrityCheck(); } @@ -2498,12 +2494,12 @@ TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) { // If the LRU is corrupt, we delete the cache. void DiskCacheBackendTest::BackendInvalidRankings() { disk_cache::Entry* entry; - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); entry->Close(); EXPECT_EQ(2, cache_->GetEntryCount()); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry)); FlushQueueForTest(); // Allow the restart to finish. EXPECT_EQ(0, cache_->GetEntryCount()); } @@ -2543,10 +2539,10 @@ TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) { // If the LRU is corrupt and we have open entries, we disable the cache. void DiskCacheBackendTest::BackendDisable() { disk_cache::Entry *entry1, *entry2; - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1)); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2)); EXPECT_EQ(0, cache_->GetEntryCount()); EXPECT_NE(net::OK, CreateEntry("Something new", &entry2)); @@ -2594,9 +2590,9 @@ void DiskCacheBackendTest::BackendDisable2() { EXPECT_EQ(8, cache_->GetEntryCount()); disk_cache::Entry* entry; - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); int count = 0; - while (OpenNextEntry(&iter, &entry) == net::OK) { + while (iter->OpenNextEntry(&entry) == net::OK) { ASSERT_TRUE(NULL != entry); entry->Close(); count++; @@ -2642,12 +2638,12 @@ TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) { // If the index size changes when we disable the cache, we should not crash. void DiskCacheBackendTest::BackendDisable3() { disk_cache::Entry *entry1, *entry2; - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); EXPECT_EQ(2, cache_->GetEntryCount()); - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1)); entry1->Close(); - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2)); FlushQueueForTest(); ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2)); @@ -2676,8 +2672,8 @@ TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) { // If we disable the cache, already open entries should work as far as possible. void DiskCacheBackendTest::BackendDisable4() { disk_cache::Entry *entry1, *entry2, *entry3, *entry4; - void* iter = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1)); + scoped_ptr<TestIterator> iter = CreateIterator(); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1)); char key2[2000]; char key3[20000]; @@ -2695,7 +2691,7 @@ void DiskCacheBackendTest::BackendDisable4() { EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false)); // This line should disable the cache but not delete it. - EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4)); + EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4)); EXPECT_EQ(0, cache_->GetEntryCount()); EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4)); @@ -3376,29 +3372,29 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) { // Check that enumeration returns all entries. std::set<std::string> keys_to_match(key_pool); - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); size_t count = 0; - ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); - cache_->EndEnumeration(&iter); + ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); + iter.reset(); EXPECT_EQ(key_pool.size(), count); EXPECT_TRUE(keys_to_match.empty()); // Check that opening entries does not affect enumeration. keys_to_match = key_pool; - iter = NULL; + iter = CreateIterator(); count = 0; disk_cache::Entry* entry_opened_before; ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before)); ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, - &iter, + iter.get(), &keys_to_match, &count)); disk_cache::Entry* entry_opened_middle; ASSERT_EQ(net::OK, OpenEntry(*(keys_to_match.begin()), &entry_opened_middle)); - ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); - cache_->EndEnumeration(&iter); + ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); + iter.reset(); entry_opened_before->Close(); entry_opened_middle->Close(); @@ -3416,10 +3412,10 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) { // Check that enumeration returns all entries but the doomed one. std::set<std::string> keys_to_match(key_pool); - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); size_t count = 0; ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2, - &iter, + iter.get(), &keys_to_match, &count)); @@ -3427,8 +3423,8 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) { DoomEntry(key_to_delete); keys_to_match.erase(key_to_delete); key_pool.erase(key_to_delete); - ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); - cache_->EndEnumeration(&iter); + ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); + iter.reset(); EXPECT_EQ(key_pool.size(), count); EXPECT_TRUE(keys_to_match.empty()); @@ -3464,10 +3460,10 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) { // Check that enumeration returns all entries but the corrupt one. std::set<std::string> keys_to_match(key_pool); - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); size_t count = 0; - ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count)); - cache_->EndEnumeration(&iter); + ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count)); + iter.reset(); EXPECT_EQ(key_pool.size(), count); EXPECT_TRUE(keys_to_match.empty()); @@ -3481,9 +3477,9 @@ TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) { std::set<std::string> key_pool; ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool)); - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); disk_cache::Entry* entry = NULL; - ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry)); + ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry)); EXPECT_TRUE(entry); disk_cache::ScopedEntryPtr entry_closer(entry); diff --git a/net/disk_cache/blockfile/backend_impl.cc b/net/disk_cache/blockfile/backend_impl.cc index c38b2bb..b278543 100644 --- a/net/disk_cache/blockfile/backend_impl.cc +++ b/net/disk_cache/blockfile/backend_impl.cc @@ -1250,16 +1250,32 @@ int BackendImpl::DoomEntriesSince(const base::Time initial_time, return net::ERR_IO_PENDING; } -int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) { - DCHECK(!callback.is_null()); - background_queue_.OpenNextEntry(iter, next_entry, callback); - return net::ERR_IO_PENDING; -} +class BackendImpl::IteratorImpl : public Backend::Iterator { + public: + explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue) + : background_queue_(background_queue), data_(NULL) { + } -void BackendImpl::EndEnumeration(void** iter) { - background_queue_.EndEnumeration(*iter); - *iter = NULL; + virtual ~IteratorImpl() { + if (background_queue_) + background_queue_->EndEnumeration(data_); + } + + virtual int OpenNextEntry(Entry** next_entry, + const net::CompletionCallback& callback) OVERRIDE { + if (!background_queue_) + return net::ERR_FAILED; + background_queue_->OpenNextEntry(&data_, next_entry, callback); + return net::ERR_IO_PENDING; + } + + private: + const base::WeakPtr<InFlightBackendIO> background_queue_; + void* data_; +}; + +scoped_ptr<Backend::Iterator> BackendImpl::CreateIterator() { + return scoped_ptr<Backend::Iterator>(new IteratorImpl(GetBackgroundQueue())); } void BackendImpl::GetStats(StatsItems* stats) { diff --git a/net/disk_cache/blockfile/backend_impl.h b/net/disk_cache/blockfile/backend_impl.h index e2304ff..775374e 100644 --- a/net/disk_cache/blockfile/backend_impl.h +++ b/net/disk_cache/blockfile/backend_impl.h @@ -273,14 +273,21 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend { const CompletionCallback& callback) OVERRIDE; virtual int DoomEntriesSince(base::Time initial_time, const CompletionCallback& callback) OVERRIDE; - virtual int OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) OVERRIDE; - virtual void EndEnumeration(void** iter) OVERRIDE; + // NOTE: The blockfile Backend::Iterator::OpenNextEntry method does not modify + // the last_used field of the entry, and therefore it does not impact the + // eviction ranking of the entry. However, an enumeration will go through all + // entries on the cache only if the cache is not modified while the + // enumeration is taking place. Significantly altering the entry pointed by + // the iterator (for example, deleting the entry) will invalidate the + // iterator. Performing operations on an entry that modify the entry may + // result in loops in the iteration, skipped entries or similar. + virtual scoped_ptr<Iterator> CreateIterator() OVERRIDE; virtual void GetStats(StatsItems* stats) OVERRIDE; virtual void OnExternalCacheHit(const std::string& key) OVERRIDE; private: typedef base::hash_map<CacheAddr, EntryImpl*> EntriesMap; + class IteratorImpl; // Creates a new backing file for the cache index. bool CreateBackingStore(disk_cache::File* file); diff --git a/net/disk_cache/blockfile/backend_impl_v3.cc b/net/disk_cache/blockfile/backend_impl_v3.cc index 9a7cff1..b44b195 100644 --- a/net/disk_cache/blockfile/backend_impl_v3.cc +++ b/net/disk_cache/blockfile/backend_impl_v3.cc @@ -660,17 +660,27 @@ int BackendImplV3::DoomEntriesSince(base::Time initial_time, } } -int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) { - DCHECK(!callback.is_null()); - background_queue_.OpenNextEntry(iter, next_entry, callback); - return net::ERR_IO_PENDING; -} +class BackendImplV3::IteratorImpl : public Backend::Iterator { + public: + explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue) + : background_queue_(background_queue), data_(NULL) { + } + + virtual int OpenNextEntry(Entry** next_entry, + const net::CompletionCallback& callback) OVERRIDE { + if (!background_queue_) + return net::ERR_FAILED; + background_queue_->OpenNextEntry(&data_, next_entry, callback); + return net::ERR_IO_PENDING; + } -void BackendImplV3::EndEnumeration(void** iter) { - scoped_ptr<IndexIterator> iterator( - reinterpret_cast<IndexIterator*>(*iter)); - *iter = NULL; + private: + const base::WeakPtr<InFlightBackendIO> background_queue_; + void* data_; +}; + +scoped_ptr<Backend::Iterator> BackendImplV3::CreateIterator() { + return scoped_ptr<Backend::Iterator>(new IteratorImpl(GetBackgroundQueue())); } void BackendImplV3::GetStats(StatsItems* stats) { @@ -1497,13 +1507,17 @@ int BackendImplV3::DoomEntriesSince(base::Time initial_time, return net::ERR_FAILED; } -int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) { - return net::ERR_FAILED; -} -void BackendImplV3::EndEnumeration(void** iter) { - NOTIMPLEMENTED(); +class BackendImplV3::NotImplementedIterator : public Backend::Iterator { + public: + virtual int OpenNextEntry(disk_cache::Entry** next_entry, + const net::CompletionCallback& callback) OVERRIDE { + return net::ERR_NOT_IMPLEMENTED; + } +}; + +scoped_ptr<Backend::Iterator> BackendImplV3::CreateIterator() { + return scoped_ptr<Iterator>(new NotImplementedIterator()); } void BackendImplV3::GetStats(StatsItems* stats) { diff --git a/net/disk_cache/blockfile/backend_impl_v3.h b/net/disk_cache/blockfile/backend_impl_v3.h index b57bb89..ca64997 100644 --- a/net/disk_cache/blockfile/backend_impl_v3.h +++ b/net/disk_cache/blockfile/backend_impl_v3.h @@ -186,15 +186,15 @@ class NET_EXPORT_PRIVATE BackendImplV3 : public Backend { const CompletionCallback& callback) OVERRIDE; virtual int DoomEntriesSince(base::Time initial_time, const CompletionCallback& callback) OVERRIDE; - virtual int OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) OVERRIDE; - virtual void EndEnumeration(void** iter) OVERRIDE; + virtual scoped_ptr<Iterator> CreateIterator() OVERRIDE; virtual void GetStats(StatsItems* stats) OVERRIDE; virtual void OnExternalCacheHit(const std::string& key) OVERRIDE; private: friend class EvictionV3; typedef base::hash_map<CacheAddr, EntryImplV3*> EntriesMap; + class IteratorImpl; + class NotImplementedIterator; class Worker; void AdjustMaxCacheSize(); diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h index 4f64b9a..769ab36 100644 --- a/net/disk_cache/disk_cache.h +++ b/net/disk_cache/disk_cache.h @@ -13,6 +13,7 @@ #include "base/basictypes.h" #include "base/memory/ref_counted.h" +#include "base/memory/scoped_ptr.h" #include "base/time/time.h" #include "net/base/cache_type.h" #include "net/base/completion_callback.h" @@ -64,6 +65,26 @@ class NET_EXPORT Backend { public: typedef net::CompletionCallback CompletionCallback; + class Iterator { + public: + virtual ~Iterator() {} + + // OpenNextEntry returns |net::OK| and provides |next_entry| if there is an + // entry to enumerate. It returns |net::ERR_FAILED| at the end of + // enumeration. If the function returns |net::ERR_IO_PENDING|, then the + // final result will be passed to the provided |callback|, otherwise + // |callback| will not be called. If any entry in the cache is modified + // during iteration, the result of this function is thereafter undefined. + // + // Calling OpenNextEntry after the backend which created it is destroyed + // may fail with |net::ERR_FAILED|; however it should not crash. + // + // Some cache backends make stronger guarantees about mutation during + // iteration, see top comment in simple_backend_impl.h for details. + virtual int OpenNextEntry(Entry** next_entry, + const CompletionCallback& callback) = 0; + }; + // If the backend is destroyed when there are operations in progress (any // callback that has not been invoked yet), this method cancels said // operations so the callbacks are not invoked, possibly leaving the work @@ -123,31 +144,9 @@ class NET_EXPORT Backend { virtual int DoomEntriesSince(base::Time initial_time, const CompletionCallback& callback) = 0; - // Enumerates the cache. Initialize |iter| to NULL before calling this method - // the first time. That will cause the enumeration to start at the head of - // the cache. For subsequent calls, pass the same |iter| pointer again without - // changing its value. This method returns ERR_FAILED when there are no more - // entries to enumerate. When the entry pointer is no longer needed, its - // Close method should be called. The return value is a net error code. If - // this method returns ERR_IO_PENDING, the |callback| will be invoked when the - // |next_entry| is available. The pointer to receive the |next_entry| must - // remain valid until the operation completes. - // - // NOTE: This method does not modify the last_used field of the entry, and - // therefore it does not impact the eviction ranking of the entry. However, - // an enumeration will go through all entries on the cache only if the cache - // is not modified while the enumeration is taking place. Significantly - // altering the entry pointed by |iter| (for example, deleting the entry) will - // invalidate |iter|. Performing operations on an entry that modify the entry - // may result in loops in the iteration, skipped entries or similar. - virtual int OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) = 0; - - // Releases iter without returning the next entry. Whenever OpenNextEntry() - // returns true, but the caller is not interested in continuing the - // enumeration by calling OpenNextEntry() again, the enumeration must be - // ended by calling this method with iter returned by OpenNextEntry(). - virtual void EndEnumeration(void** iter) = 0; + // Returns an iterator which will enumerate all entries of the cache in an + // undefined order. + virtual scoped_ptr<Iterator> CreateIterator() = 0; // Return a list of cache statistics. virtual void GetStats( diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc index d0f9842..c3d94e6 100644 --- a/net/disk_cache/disk_cache_test_base.cc +++ b/net/disk_cache/disk_cache_test_base.cc @@ -52,6 +52,20 @@ void DiskCacheTest::TearDown() { base::RunLoop().RunUntilIdle(); } +DiskCacheTestWithCache::TestIterator::TestIterator( + scoped_ptr<disk_cache::Backend::Iterator> iterator) + : iterator_(iterator.Pass()) { +} + +DiskCacheTestWithCache::TestIterator::~TestIterator() {} + +int DiskCacheTestWithCache::TestIterator::OpenNextEntry( + disk_cache::Entry** next_entry) { + net::TestCompletionCallback cb; + int rv = iterator_->OpenNextEntry(next_entry, cb.callback()); + return cb.GetResult(rv); +} + DiskCacheTestWithCache::DiskCacheTestWithCache() : cache_impl_(NULL), simple_cache_impl_(NULL), @@ -153,11 +167,9 @@ int DiskCacheTestWithCache::DoomEntriesSince(const base::Time initial_time) { return cb.GetResult(rv); } -int DiskCacheTestWithCache::OpenNextEntry(void** iter, - disk_cache::Entry** next_entry) { - net::TestCompletionCallback cb; - int rv = cache_->OpenNextEntry(iter, next_entry, cb.callback()); - return cb.GetResult(rv); +scoped_ptr<DiskCacheTestWithCache::TestIterator> + DiskCacheTestWithCache::CreateIterator() { + return scoped_ptr<TestIterator>(new TestIterator(cache_->CreateIterator())); } void DiskCacheTestWithCache::FlushQueueForTest() { diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h index b724906..225d37f 100644 --- a/net/disk_cache/disk_cache_test_base.h +++ b/net/disk_cache/disk_cache_test_base.h @@ -11,6 +11,7 @@ #include "base/memory/scoped_ptr.h" #include "base/threading/thread.h" #include "net/base/cache_type.h" +#include "net/disk_cache/disk_cache.h" #include "testing/gtest/include/gtest/gtest.h" #include "testing/platform_test.h" @@ -57,6 +58,17 @@ class DiskCacheTest : public PlatformTest { // Provides basic support for cache related tests. class DiskCacheTestWithCache : public DiskCacheTest { protected: + class TestIterator { + public: + explicit TestIterator(scoped_ptr<disk_cache::Backend::Iterator> iterator); + ~TestIterator(); + + int OpenNextEntry(disk_cache::Entry** next_entry); + + private: + scoped_ptr<disk_cache::Backend::Iterator> iterator_; + }; + DiskCacheTestWithCache(); virtual ~DiskCacheTestWithCache(); @@ -117,7 +129,7 @@ class DiskCacheTestWithCache : public DiskCacheTest { int DoomEntriesBetween(const base::Time initial_time, const base::Time end_time); int DoomEntriesSince(const base::Time initial_time); - int OpenNextEntry(void** iter, disk_cache::Entry** next_entry); + scoped_ptr<TestIterator> CreateIterator(); void FlushQueueForTest(); void RunTaskForTest(const base::Closure& closure); int ReadData(disk_cache::Entry* entry, int index, int offset, diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc index 803f98f..afe93ac 100644 --- a/net/disk_cache/entry_unittest.cc +++ b/net/disk_cache/entry_unittest.cc @@ -1617,10 +1617,10 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) { parent_entry->Close(); // Perform the enumerations. - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); disk_cache::Entry* entry = NULL; int count = 0; - while (OpenNextEntry(&iter, &entry) == net::OK) { + while (iter->OpenNextEntry(&entry) == net::OK) { ASSERT_TRUE(entry != NULL); ++count; disk_cache::MemEntryImpl* mem_entry = @@ -2217,10 +2217,10 @@ TEST_F(DiskCacheEntryTest, CleanupSparseEntry) { entry->Close(); EXPECT_EQ(4, cache_->GetEntryCount()); - void* iter = NULL; + scoped_ptr<TestIterator> iter = CreateIterator(); int count = 0; std::string child_key[2]; - while (OpenNextEntry(&iter, &entry) == net::OK) { + while (iter->OpenNextEntry(&entry) == net::OK) { ASSERT_TRUE(entry != NULL); // Writing to an entry will alter the LRU list and invalidate the iterator. if (entry->GetKey() != key && count < 2) diff --git a/net/disk_cache/memory/mem_backend_impl.cc b/net/disk_cache/memory/mem_backend_impl.cc index e69c00e..848ef98 100644 --- a/net/disk_cache/memory/mem_backend_impl.cc +++ b/net/disk_cache/memory/mem_backend_impl.cc @@ -29,7 +29,8 @@ int LowWaterAdjust(int high_water) { namespace disk_cache { MemBackendImpl::MemBackendImpl(net::NetLog* net_log) - : max_size_(0), current_size_(0), net_log_(net_log) {} + : max_size_(0), current_size_(0), net_log_(net_log), weak_factory_(this) { +} MemBackendImpl::~MemBackendImpl() { EntryMap::iterator it = entries_.begin(); @@ -180,16 +181,40 @@ int MemBackendImpl::DoomEntriesSince(const base::Time initial_time, return net::ERR_FAILED; } -int MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) { - if (OpenNextEntry(iter, next_entry)) - return net::OK; +class MemBackendImpl::MemIterator : public Backend::Iterator { + public: + explicit MemIterator(base::WeakPtr<MemBackendImpl> backend) + : backend_(backend), current_(NULL) { + } - return net::ERR_FAILED; -} + virtual int OpenNextEntry(Entry** next_entry, + const CompletionCallback& callback) OVERRIDE { + if (!backend_) + return net::ERR_FAILED; + + MemEntryImpl* node = backend_->rankings_.GetNext(current_); + // We should never return a child entry so iterate until we hit a parent + // entry. + while (node && node->type() != MemEntryImpl::kParentEntry) + node = backend_->rankings_.GetNext(node); + *next_entry = node; + current_ = node; + + if (node) { + node->Open(); + return net::OK; + } + return net::ERR_FAILED; + } -void MemBackendImpl::EndEnumeration(void** iter) { - *iter = NULL; + private: + base::WeakPtr<MemBackendImpl> backend_; + MemEntryImpl* current_; +}; + +scoped_ptr<Backend::Iterator> MemBackendImpl::CreateIterator() { + return scoped_ptr<Backend::Iterator>( + new MemIterator(weak_factory_.GetWeakPtr())); } void MemBackendImpl::OnExternalCacheHit(const std::string& key) { @@ -287,23 +312,6 @@ bool MemBackendImpl::DoomEntriesSince(const Time initial_time) { } } -bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) { - MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter); - MemEntryImpl* node = rankings_.GetNext(current); - // We should never return a child entry so iterate until we hit a parent - // entry. - while (node && node->type() != MemEntryImpl::kParentEntry) { - node = rankings_.GetNext(node); - } - *next_entry = node; - *iter = node; - - if (node) - node->Open(); - - return NULL != node; -} - void MemBackendImpl::TrimCache(bool empty) { MemEntryImpl* next = rankings_.GetPrev(NULL); if (!next) diff --git a/net/disk_cache/memory/mem_backend_impl.h b/net/disk_cache/memory/mem_backend_impl.h index 5f31be5..10946c5 100644 --- a/net/disk_cache/memory/mem_backend_impl.h +++ b/net/disk_cache/memory/mem_backend_impl.h @@ -9,6 +9,7 @@ #include "base/compiler_specific.h" #include "base/containers/hash_tables.h" +#include "base/memory/weak_ptr.h" #include "net/disk_cache/disk_cache.h" #include "net/disk_cache/memory/mem_rankings.h" @@ -76,14 +77,15 @@ class NET_EXPORT_PRIVATE MemBackendImpl : public Backend { const CompletionCallback& callback) OVERRIDE; virtual int DoomEntriesSince(base::Time initial_time, const CompletionCallback& callback) OVERRIDE; - virtual int OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) OVERRIDE; - virtual void EndEnumeration(void** iter) OVERRIDE; + virtual scoped_ptr<Iterator> CreateIterator() OVERRIDE; virtual void GetStats( std::vector<std::pair<std::string, std::string> >* stats) OVERRIDE {} virtual void OnExternalCacheHit(const std::string& key) OVERRIDE; private: + class MemIterator; + friend class MemIterator; + typedef base::hash_map<std::string, MemEntryImpl*> EntryMap; // Old Backend interface. @@ -94,7 +96,6 @@ class NET_EXPORT_PRIVATE MemBackendImpl : public Backend { bool DoomEntriesBetween(const base::Time initial_time, const base::Time end_time); bool DoomEntriesSince(const base::Time initial_time); - bool OpenNextEntry(void** iter, Entry** next_entry); // Deletes entries from the cache until the current size is below the limit. // If empty is true, the whole cache will be trimmed, regardless of being in @@ -112,6 +113,8 @@ class NET_EXPORT_PRIVATE MemBackendImpl : public Backend { net::NetLog* net_log_; + base::WeakPtrFactory<MemBackendImpl> weak_factory_; + DISALLOW_COPY_AND_ASSIGN(MemBackendImpl); }; diff --git a/net/disk_cache/simple/simple_backend_impl.cc b/net/disk_cache/simple/simple_backend_impl.cc index 2e16576..e84211b 100644 --- a/net/disk_cache/simple/simple_backend_impl.cc +++ b/net/disk_cache/simple/simple_backend_impl.cc @@ -473,18 +473,78 @@ int SimpleBackendImpl::DoomEntriesSince( return DoomEntriesBetween(initial_time, Time(), callback); } -int SimpleBackendImpl::OpenNextEntry(void** iter, - Entry** next_entry, - const CompletionCallback& callback) { - CompletionCallback get_next_entry = - base::Bind(&SimpleBackendImpl::GetNextEntryInIterator, AsWeakPtr(), iter, - next_entry, callback); - return index_->ExecuteWhenReady(get_next_entry); -} +class SimpleBackendImpl::SimpleIterator FINAL : public Iterator { + public: + explicit SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend) + : backend_(backend), + weak_factory_(this) { + } + + // From Backend::Iterator: + virtual int OpenNextEntry(Entry** next_entry, + const CompletionCallback& callback) OVERRIDE { + CompletionCallback open_next_entry_impl = + base::Bind(&SimpleIterator::OpenNextEntryImpl, + weak_factory_.GetWeakPtr(), next_entry, callback); + return backend_->index_->ExecuteWhenReady(open_next_entry_impl); + } + + void OpenNextEntryImpl(Entry** next_entry, + const CompletionCallback& callback, + int index_initialization_error_code) { + if (!backend_) { + callback.Run(net::ERR_FAILED); + return; + } + if (index_initialization_error_code != net::OK) { + callback.Run(index_initialization_error_code); + return; + } + if (!hashes_to_enumerate_) + hashes_to_enumerate_ = backend_->index()->GetAllHashes().Pass(); + + while (!hashes_to_enumerate_->empty()) { + uint64 entry_hash = hashes_to_enumerate_->back(); + hashes_to_enumerate_->pop_back(); + if (backend_->index()->Has(entry_hash)) { + *next_entry = NULL; + CompletionCallback continue_iteration = base::Bind( + &SimpleIterator::CheckIterationReturnValue, + weak_factory_.GetWeakPtr(), + next_entry, + callback); + int error_code_open = backend_->OpenEntryFromHash(entry_hash, + next_entry, + continue_iteration); + if (error_code_open == net::ERR_IO_PENDING) + return; + if (error_code_open != net::ERR_FAILED) { + callback.Run(error_code_open); + return; + } + } + } + callback.Run(net::ERR_FAILED); + } -void SimpleBackendImpl::EndEnumeration(void** iter) { - active_enumerations_.Remove(IteratorToEnumerationId(iter)); - *iter = NULL; + void CheckIterationReturnValue(Entry** entry, + const CompletionCallback& callback, + int error_code) { + if (error_code == net::ERR_FAILED) { + OpenNextEntry(entry, callback); + return; + } + callback.Run(error_code); + } + + private: + base::WeakPtr<SimpleBackendImpl> backend_; + scoped_ptr<std::vector<uint64> > hashes_to_enumerate_; + base::WeakPtrFactory<SimpleIterator> weak_factory_; +}; + +scoped_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() { + return scoped_ptr<Iterator>(new SimpleIterator(AsWeakPtr())); } void SimpleBackendImpl::GetStats( @@ -499,27 +559,6 @@ void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) { index_->UseIfExists(simple_util::GetEntryHashKey(key)); } -// static -SimpleBackendImpl::ActiveEnumerationMap::KeyType - SimpleBackendImpl::IteratorToEnumerationId(void** iter) { - COMPILE_ASSERT(sizeof(ptrdiff_t) >= sizeof(*iter), - integer_type_must_fit_ptr_type_for_cast_to_be_reversible); - const ptrdiff_t ptrdiff_enumeration_id = reinterpret_cast<ptrdiff_t>(*iter); - const ActiveEnumerationMap::KeyType enumeration_id = ptrdiff_enumeration_id; - DCHECK_EQ(enumeration_id, ptrdiff_enumeration_id); - return enumeration_id; -} - -// static -void* SimpleBackendImpl::EnumerationIdToIterator( - ActiveEnumerationMap::KeyType enumeration_id) { - const ptrdiff_t ptrdiff_enumeration_id = enumeration_id; - DCHECK_EQ(enumeration_id, ptrdiff_enumeration_id); - COMPILE_ASSERT(sizeof(ptrdiff_t) >= sizeof(void*), - integer_type_must_fit_ptr_type_for_cast_to_be_reversible); - return reinterpret_cast<void*>(ptrdiff_enumeration_id); -} - void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback, const DiskStatResult& result) { if (result.net_error == net::OK) { @@ -633,49 +672,6 @@ int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash, return net::ERR_IO_PENDING; } -void SimpleBackendImpl::GetNextEntryInIterator( - void** iter, - Entry** next_entry, - const CompletionCallback& callback, - int error_code) { - if (error_code != net::OK) { - callback.Run(error_code); - return; - } - std::vector<uint64>* entry_list = NULL; - if (*iter == NULL) { - const ActiveEnumerationMap::KeyType new_enumeration_id = - active_enumerations_.Add( - entry_list = index()->GetAllHashes().release()); - *iter = EnumerationIdToIterator(new_enumeration_id); - } else { - entry_list = active_enumerations_.Lookup(IteratorToEnumerationId(iter)); - } - while (entry_list->size() > 0) { - uint64 entry_hash = entry_list->back(); - entry_list->pop_back(); - if (index()->Has(entry_hash)) { - *next_entry = NULL; - CompletionCallback continue_iteration = base::Bind( - &SimpleBackendImpl::CheckIterationReturnValue, - AsWeakPtr(), - iter, - next_entry, - callback); - int error_code_open = OpenEntryFromHash(entry_hash, - next_entry, - continue_iteration); - if (error_code_open == net::ERR_IO_PENDING) - return; - if (error_code_open != net::ERR_FAILED) { - callback.Run(error_code_open); - return; - } - } - } - callback.Run(net::ERR_FAILED); -} - void SimpleBackendImpl::OnEntryOpenedFromHash( uint64 hash, Entry** entry, @@ -729,18 +725,6 @@ void SimpleBackendImpl::OnEntryOpenedFromKey( callback.Run(final_code); } -void SimpleBackendImpl::CheckIterationReturnValue( - void** iter, - Entry** entry, - const CompletionCallback& callback, - int error_code) { - if (error_code == net::ERR_FAILED) { - OpenNextEntry(iter, entry, callback); - return; - } - callback.Run(error_code); -} - void SimpleBackendImpl::DoomEntriesComplete( scoped_ptr<std::vector<uint64> > entry_hashes, const net::CompletionCallback& callback, diff --git a/net/disk_cache/simple/simple_backend_impl.h b/net/disk_cache/simple/simple_backend_impl.h index 907ee5a..48c422f 100644 --- a/net/disk_cache/simple/simple_backend_impl.h +++ b/net/disk_cache/simple/simple_backend_impl.h @@ -13,7 +13,6 @@ #include "base/compiler_specific.h" #include "base/containers/hash_tables.h" #include "base/files/file_path.h" -#include "base/id_map.h" #include "base/memory/ref_counted.h" #include "base/memory/scoped_ptr.h" #include "base/memory/weak_ptr.h" @@ -35,6 +34,11 @@ namespace disk_cache { // files. // See http://www.chromium.org/developers/design-documents/network-stack/disk-cache/very-simple-backend // +// The SimpleBackendImpl provides safe iteration; mutating entries during +// iteration cannot cause a crash. It is undefined whether entries created or +// destroyed during the iteration will be included in any pre-existing +// iterations. +// // The non-static functions below must be called on the IO thread unless // otherwise stated. @@ -98,17 +102,16 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend, const CompletionCallback& callback) OVERRIDE; virtual int DoomEntriesSince(base::Time initial_time, const CompletionCallback& callback) OVERRIDE; - virtual int OpenNextEntry(void** iter, Entry** next_entry, - const CompletionCallback& callback) OVERRIDE; - virtual void EndEnumeration(void** iter) OVERRIDE; + virtual scoped_ptr<Iterator> CreateIterator() OVERRIDE; virtual void GetStats( std::vector<std::pair<std::string, std::string> >* stats) OVERRIDE; virtual void OnExternalCacheHit(const std::string& key) OVERRIDE; private: - typedef base::hash_map<uint64, SimpleEntryImpl*> EntryMap; + class SimpleIterator; + friend class SimpleIterator; - typedef IDMap<std::vector<uint64>, IDMapOwnPointer> ActiveEnumerationMap; + typedef base::hash_map<uint64, SimpleEntryImpl*> EntryMap; typedef base::Callback<void(base::Time mtime, uint64 max_size, int result)> InitializeIndexCallback; @@ -124,17 +127,6 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend, int net_error; }; - // Convert an iterator from OpenNextEntry() to the key type for - // ActiveEnumerationMap. Note it takes a void** argument; this is for safety; - // if it took a void*, that would be type compatible with a void** permitting - // easy calls missing the dereference. - static ActiveEnumerationMap::KeyType IteratorToEnumerationId(void** iter); - - // Convert a key from ActiveEnumerationMap back to a void*, suitable for - // storing in the iterator argument to OpenNextEntry(). - static void* EnumerationIdToIterator( - ActiveEnumerationMap::KeyType enumeration_id); - void InitializeIndex(const CompletionCallback& callback, const DiskStatResult& result); @@ -169,14 +161,6 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend, // which is very important to prevent races in DoomEntries() above. int DoomEntryFromHash(uint64 entry_hash, const CompletionCallback & callback); - // Called when the index is initilized to find the next entry in the iterator - // |iter|. If there are no more hashes in the iterator list, net::ERR_FAILED - // is returned. Otherwise, calls OpenEntryFromHash. - void GetNextEntryInIterator(void** iter, - Entry** next_entry, - const CompletionCallback& callback, - int error_code); - // Called when we tried to open an entry with hash alone. When a blank entry // has been created and filled in with information from the disk - based on a // hash alone - this checks that a duplicate active entry was not created @@ -195,14 +179,6 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend, const CompletionCallback& callback, int error_code); - // Called at the end of the asynchronous operation triggered by - // OpenEntryFromHash. Makes sure to continue iterating if the open entry was - // not a success. - void CheckIterationReturnValue(void** iter, - Entry** entry, - const CompletionCallback& callback, - int error_code); - // A callback thunk used by DoomEntries to clear the |entries_pending_doom_| // after a mass doom. void DoomEntriesComplete(scoped_ptr<std::vector<uint64> > entry_hashes, @@ -220,9 +196,6 @@ class NET_EXPORT_PRIVATE SimpleBackendImpl : public Backend, EntryMap active_entries_; - // One entry for every enumeration in progress. - ActiveEnumerationMap active_enumerations_; - // The set of all entries which are currently being doomed. To avoid races, // these entries cannot have Doom/Create/Open operations run until the doom // is complete. The base::Closure map target is used to store deferred |