diff options
-rw-r--r-- | net/disk_cache/backend_impl.cc | 39 | ||||
-rw-r--r-- | net/disk_cache/backend_impl.h | 15 | ||||
-rw-r--r-- | net/disk_cache/block_files.cc | 69 | ||||
-rw-r--r-- | net/disk_cache/block_files.h | 8 | ||||
-rw-r--r-- | net/disk_cache/block_files_unittest.cc | 49 | ||||
-rw-r--r-- | net/disk_cache/disk_cache_test_base.cc | 12 | ||||
-rw-r--r-- | net/disk_cache/disk_cache_test_base.h | 1 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.cc | 25 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.h | 3 | ||||
-rw-r--r-- | net/disk_cache/entry_unittest.cc | 133 | ||||
-rw-r--r-- | net/disk_cache/in_flight_backend_io.cc | 16 | ||||
-rw-r--r-- | net/disk_cache/in_flight_backend_io.h | 4 | ||||
-rw-r--r-- | net/disk_cache/stats.cc | 6 | ||||
-rw-r--r-- | net/disk_cache/stats.h | 4 | ||||
-rw-r--r-- | net/disk_cache/stress_cache.cc | 26 |
15 files changed, 356 insertions, 54 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc index c52a853..80dadd7 100644 --- a/net/disk_cache/backend_impl.cc +++ b/net/disk_cache/backend_impl.cc @@ -618,6 +618,7 @@ void BackendImpl::CleanupCache() { timer_.Stop(); if (init_) { + stats_.Store(); if (data_) data_->header.crash = 0; @@ -627,6 +628,7 @@ void BackendImpl::CleanupCache() { DCHECK(!num_refs_); } } + block_files_.CloseFiles(); factory_.RevokeAll(); ptr_factory_.InvalidateWeakPtrs(); done_.Signal(); @@ -1020,11 +1022,16 @@ void BackendImpl::RemoveEntry(EntryImpl* entry) { DecreaseNumEntries(); } -void BackendImpl::CacheEntryDestroyed(Addr address) { +void BackendImpl::OnEntryDestroyBegin(Addr address) { EntriesMap::iterator it = open_entries_.find(address.value()); if (it != open_entries_.end()) open_entries_.erase(it); +} + +void BackendImpl::OnEntryDestroyEnd() { DecreaseNumRefs(); + if (data_->header.num_bytes > max_size_ && !read_only_) + eviction_.TrimCache(false); } EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { @@ -1278,6 +1285,11 @@ int BackendImpl::FlushQueueForTest(CompletionCallback* callback) { return net::ERR_IO_PENDING; } +int BackendImpl::RunTaskForTest(Task* task, CompletionCallback* callback) { + background_queue_.RunTask(task, callback); + return net::ERR_IO_PENDING; +} + int BackendImpl::SelfCheck() { if (!init_) { LOG(ERROR) << "Init failed"; @@ -1476,9 +1488,13 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) { if (!rankings_.SanityCheck(cache_entry->rankings(), false)) return ERR_INVALID_LINKS; - // We only add clean entries to the map. - if (!*dirty) + if (*dirty) { + Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), + address.value()); + } else { + // We only add clean entries to the map. open_entries_[address.value()] = cache_entry; + } cache_entry.swap(entry); return 0; @@ -1759,9 +1775,6 @@ void BackendImpl::DestroyInvalidEntryFromEnumeration(EntryImpl* entry) { void BackendImpl::AddStorageSize(int32 bytes) { data_->header.num_bytes += bytes; DCHECK_GE(data_->header.num_bytes, 0); - - if (data_->header.num_bytes > max_size_ && !read_only_) - eviction_.TrimCache(false); } void BackendImpl::SubstractStorageSize(int32 bytes) { @@ -1985,6 +1998,7 @@ int BackendImpl::CheckAllEntries() { } } + Trace("CheckAllEntries End"); if (num_entries + num_dirty != data_->header.num_entries) { LOG(ERROR) << "Number of entries mismatch"; return ERR_NUM_ENTRIES_MISMATCH; @@ -1994,8 +2008,19 @@ int BackendImpl::CheckAllEntries() { } bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { + bool ok = block_files_.IsValid(cache_entry->entry()->address()); + ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); + EntryStore* data = cache_entry->entry()->Data(); + for (size_t i = 0; i < arraysize(data->data_addr); i++) { + if (data->data_addr[i]) { + Addr address(data->data_addr[i]); + if (address.is_block_file()) + ok = ok && block_files_.IsValid(address); + } + } + RankingsNode* rankings = cache_entry->rankings()->Data(); - return !rankings->dummy; + return ok && !rankings->dummy; } int BackendImpl::MaxBuffersSize() { diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h index 58321be..2c93d67 100644 --- a/net/disk_cache/backend_impl.h +++ b/net/disk_cache/backend_impl.h @@ -140,9 +140,14 @@ class BackendImpl : public Backend { // Removes all references to this entry. void RemoveEntry(EntryImpl* entry); - // This method must be called whenever an entry is released for the last time. - // |address| is the cache address of the entry. - void CacheEntryDestroyed(Addr address); + // This method must be called when an entry is released for the last time, so + // the entry should not be used anymore. |address| is the cache address of the + // entry. + void OnEntryDestroyBegin(Addr address); + + // This method must be called after all resources for an entry have been + // released. + void OnEntryDestroyEnd(); // If the data stored by the provided |rankings| points to an open entry, // returns a pointer to that entry, otherwise returns NULL. Note that this @@ -239,6 +244,10 @@ class BackendImpl : public Backend { // Sends a dummy operation through the operation queue, for unit tests. int FlushQueueForTest(CompletionCallback* callback); + // Runs the provided task on the cache thread. The task will be automatically + // deleted after it runs. + int RunTaskForTest(Task* task, CompletionCallback* callback); + // Peforms a simple self-check, and returns the number of dirty items // or an error code (negative value). int SelfCheck(); diff --git a/net/disk_cache/block_files.cc b/net/disk_cache/block_files.cc index 5461b7e..b81a868 100644 --- a/net/disk_cache/block_files.cc +++ b/net/disk_cache/block_files.cc @@ -7,9 +7,11 @@ #include "base/file_util.h" #include "base/histogram.h" #include "base/string_util.h" +#include "base/thread_checker.h" #include "base/time.h" #include "net/disk_cache/cache_util.h" #include "net/disk_cache/file_lock.h" +#include "net/disk_cache/trace.h" using base::TimeTicks; @@ -57,6 +59,7 @@ bool CreateMapBlock(int target, int size, disk_cache::BlockFileHeader* header, disk_cache::FileLock lock(header); int index_offset = j * 4 + 4 - target; *index = current * 32 + index_offset; + DCHECK_EQ(*index / 4, (*index + size - 1) / 4); uint32 to_add = ((1 << size) - 1) << index_offset; header->allocation_map[current] |= to_add; @@ -118,6 +121,25 @@ void DeleteMapBlock(int index, int size, disk_cache::BlockFileHeader* header) { HISTOGRAM_TIMES("DiskCache.DeleteBlock", TimeTicks::Now() - start); } +// Returns true if the specified block is used. Note that this is a simplified +// version of DeleteMapBlock(). +bool UsedMapBlock(int index, int size, disk_cache::BlockFileHeader* header) { + if (size < 0 || size > disk_cache::kMaxNumBlocks) { + NOTREACHED(); + return false; + } + int byte_index = index / 8; + uint8* byte_map = reinterpret_cast<uint8*>(header->allocation_map); + uint8 map_block = byte_map[byte_index]; + + if (index % 8 >= 4) + map_block >>= 4; + + DCHECK((((1 << size) - 1) << (index % 8)) < 0x100); + uint8 to_clear = ((1 << size) - 1) << (index % 8); + return ((byte_map[byte_index] & to_clear) == to_clear); +} + // Restores the "empty counters" and allocation hints. void FixAllocationCounters(disk_cache::BlockFileHeader* header) { for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) { @@ -172,6 +194,8 @@ bool BlockFiles::Init(bool create_files) { if (init_) return false; + thread_checker_.reset(new ThreadChecker); + block_files_.resize(kFirstAdditionalBlockFile); for (int i = 0; i < kFirstAdditionalBlockFile; i++) { if (create_files) @@ -190,6 +214,9 @@ bool BlockFiles::Init(bool create_files) { } void BlockFiles::CloseFiles() { + if (init_) { + DCHECK(thread_checker_->CalledOnValidThread()); + } init_ = false; for (unsigned int i = 0; i < block_files_.size(); i++) { if (block_files_[i]) { @@ -201,6 +228,7 @@ void BlockFiles::CloseFiles() { } void BlockFiles::ReportStats() { + DCHECK(thread_checker_->CalledOnValidThread()); int used_blocks[kFirstAdditionalBlockFile]; int load[kFirstAdditionalBlockFile]; for (int i = 0; i < kFirstAdditionalBlockFile; i++) { @@ -217,6 +245,36 @@ void BlockFiles::ReportStats() { UMA_HISTOGRAM_ENUMERATION("DiskCache.BlockLoad_3", load[3], 101); } +bool BlockFiles::IsValid(Addr address) { +#ifdef NDEBUG + return true; +#else + if (!address.is_initialized() || address.is_separate_file()) + return false; + + MappedFile* file = GetFile(address); + if (!file) + return false; + + BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); + bool rv = UsedMapBlock(address.start_block(), address.num_blocks(), header); + DCHECK(rv); + + static bool read_contents = false; + if (read_contents) { + scoped_array<char> buffer; + buffer.reset(new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]); + size_t size = address.BlockSize() * address.num_blocks(); + size_t offset = address.start_block() * address.BlockSize() + + kBlockHeaderSize; + bool ok = file->Read(buffer.get(), size, offset); + DCHECK(ok); + } + + return rv; +#endif +} + bool BlockFiles::CreateBlockFile(int index, FileType file_type, bool force) { FilePath name = Name(index); int flags = @@ -282,6 +340,7 @@ bool BlockFiles::OpenBlockFile(int index) { } MappedFile* BlockFiles::GetFile(Addr address) { + DCHECK(thread_checker_->CalledOnValidThread()); DCHECK(block_files_.size() >= 4); DCHECK(address.is_block_file() || !address.is_initialized()); if (!address.is_initialized()) @@ -420,6 +479,7 @@ void BlockFiles::RemoveEmptyFile(FileType block_type) { bool BlockFiles::CreateBlock(FileType block_type, int block_count, Addr* block_address) { + DCHECK(thread_checker_->CalledOnValidThread()); if (block_type < RANKINGS || block_type > BLOCK_4K || block_count < 1 || block_count > 4) return false; @@ -447,10 +507,12 @@ bool BlockFiles::CreateBlock(FileType block_type, int block_count, Addr address(block_type, block_count, header->this_file, index); block_address->set_value(address.value()); + Trace("CreateBlock 0x%x", address.value()); return true; } void BlockFiles::DeleteBlock(Addr address, bool deep) { + DCHECK(thread_checker_->CalledOnValidThread()); if (!address.is_initialized() || address.is_separate_file()) return; @@ -462,14 +524,17 @@ void BlockFiles::DeleteBlock(Addr address, bool deep) { if (!file) return; + Trace("DeleteBlock 0x%x", address.value()); + + BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); + DeleteMapBlock(address.start_block(), address.num_blocks(), header); + size_t size = address.BlockSize() * address.num_blocks(); size_t offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; if (deep) file->Write(zero_buffer_, size, offset); - BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); - DeleteMapBlock(address.start_block(), address.num_blocks(), header); if (!header->num_entries) { // This file is now empty. Let's try to delete it. FileType type = Addr::RequiredFileType(header->entry_size); diff --git a/net/disk_cache/block_files.h b/net/disk_cache/block_files.h index 2c01906..2e41750 100644 --- a/net/disk_cache/block_files.h +++ b/net/disk_cache/block_files.h @@ -12,9 +12,12 @@ #include "base/file_path.h" #include "base/gtest_prod_util.h" +#include "base/scoped_ptr.h" #include "net/disk_cache/addr.h" #include "net/disk_cache/mapped_file.h" +class ThreadChecker; + namespace disk_cache { // This class handles the set of block-files open by the disk cache. @@ -48,6 +51,10 @@ class BlockFiles { // Sends UMA stats. void ReportStats(); + // Returns true if the blocks pointed by a given address are currently used. + // This method is only intended for debugging. + bool IsValid(Addr address); + private: // Set force to true to overwrite the file if it exists. bool CreateBlockFile(int index, FileType file_type, bool force); @@ -81,6 +88,7 @@ class BlockFiles { char* zero_buffer_; // Buffer to speed-up cleaning deleted entries. FilePath path_; // Path to the backing folder. std::vector<MappedFile*> block_files_; // The actual files. + scoped_ptr<ThreadChecker> thread_checker_; FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_ZeroSizeFile); FRIEND_TEST_ALL_PREFIXES(DiskCacheTest, BlockFiles_InvalidFile); diff --git a/net/disk_cache/block_files_unittest.cc b/net/disk_cache/block_files_unittest.cc index 2c87179..90bf048 100644 --- a/net/disk_cache/block_files_unittest.cc +++ b/net/disk_cache/block_files_unittest.cc @@ -225,4 +225,53 @@ TEST_F(DiskCacheTest, BlockFiles_Stats) { EXPECT_EQ(0, load); } +// Tests that we add and remove blocks correctly. +TEST_F(DiskCacheTest, AllocationMap) { + FilePath path = GetCacheFilePath(); + ASSERT_TRUE(DeleteCache(path)); + ASSERT_TRUE(file_util::CreateDirectory(path)); + + BlockFiles files(path); + ASSERT_TRUE(files.Init(true)); + + // Create a bunch of entries. + const int kSize = 100; + Addr address[kSize]; + for (int i = 0; i < kSize; i++) { + SCOPED_TRACE(i); + int block_size = i % 4 + 1; + EXPECT_TRUE(files.CreateBlock(BLOCK_1K, block_size, &address[i])); + EXPECT_EQ(BLOCK_1K, address[i].file_type()); + EXPECT_EQ(block_size, address[i].num_blocks()); + int start = address[i].start_block(); + EXPECT_EQ(start / 4, (start + block_size - 1) / 4); + } + + for (int i = 0; i < kSize; i++) { + SCOPED_TRACE(i); + EXPECT_TRUE(files.IsValid(address[i])); + } + + // The first part of the allocation map should be completely filled. We used + // 10 bits per each four entries, so 250 bits total. + BlockFileHeader* header = + reinterpret_cast<BlockFileHeader*>(files.GetFile(address[0])->buffer()); + uint8* buffer = reinterpret_cast<uint8*>(&header->allocation_map); + for (int i =0; i < 29; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(0xff, buffer[i]); + } + + for (int i = 0; i < kSize; i++) { + SCOPED_TRACE(i); + files.DeleteBlock(address[i], false); + } + + // The allocation map should be empty. + for (int i =0; i < 50; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(0, buffer[i]); + } +} + } // namespace disk_cache diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc index 3edfffd..cffe3fa 100644 --- a/net/disk_cache/disk_cache_test_base.cc +++ b/net/disk_cache/disk_cache_test_base.cc @@ -193,6 +193,18 @@ void DiskCacheTestWithCache::FlushQueueForTest() { EXPECT_EQ(net::OK, cb.GetResult(rv)); } +void DiskCacheTestWithCache::RunTaskForTest(Task* task) { + if (memory_only_ || !cache_impl_) { + task->Run(); + delete task; + return; + } + + TestCompletionCallback cb; + int rv = cache_impl_->RunTaskForTest(task, &cb); + EXPECT_EQ(net::OK, cb.GetResult(rv)); +} + int DiskCacheTestWithCache::ReadData(disk_cache::Entry* entry, int index, int offset, net::IOBuffer* buf, int len) { TestCompletionCallback cb; diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h index 80c50a1..faecc17 100644 --- a/net/disk_cache/disk_cache_test_base.h +++ b/net/disk_cache/disk_cache_test_base.h @@ -102,6 +102,7 @@ class DiskCacheTestWithCache : public DiskCacheTest { int DoomEntriesSince(const base::Time initial_time); int OpenNextEntry(void** iter, disk_cache::Entry** next_entry); void FlushQueueForTest(); + void RunTaskForTest(Task* task); int ReadData(disk_cache::Entry* entry, int index, int offset, net::IOBuffer* buf, int len); int WriteData(disk_cache::Entry* entry, int index, int offset, diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc index f152f10..5b65344 100644 --- a/net/disk_cache/entry_impl.cc +++ b/net/disk_cache/entry_impl.cc @@ -303,6 +303,9 @@ EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) // data related to a previous cache entry because the range was not fully // written before). EntryImpl::~EntryImpl() { + Log("~EntryImpl in"); + backend_->OnEntryDestroyBegin(entry_.address()); + // Save the sparse info to disk before deleting this entry. sparse_.reset(); @@ -333,7 +336,8 @@ EntryImpl::~EntryImpl() { } } - backend_->CacheEntryDestroyed(entry_.address()); + Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); + backend_->OnEntryDestroyEnd(); } void EntryImpl::Doom() { @@ -578,9 +582,11 @@ int EntryImpl::WriteDataImpl(int index, int offset, net::IOBuffer* buf, int entry_size = entry_.Data()->data_size[index]; bool extending = entry_size < offset + buf_len; truncate = truncate && entry_size > offset + buf_len; + Trace("To PrepareTarget 0x%x", entry_.address().value()); if (!PrepareTarget(index, offset, buf_len, truncate)) return net::ERR_FAILED; + Trace("From PrepareTarget 0x%x", entry_.address().value()); if (extending || truncate) UpdateSize(index, entry_size, offset + buf_len); @@ -771,18 +777,17 @@ void EntryImpl::DeleteEntryData(bool everything) { for (int index = 0; index < kNumStreams; index++) { Addr address(entry_.Data()->data_addr[index]); if (address.is_initialized()) { - DeleteData(address, index); backend_->ModifyStorageSize(entry_.Data()->data_size[index] - unreported_size_[index], 0); entry_.Data()->data_addr[index] = 0; entry_.Data()->data_size[index] = 0; + entry_.Store(); + DeleteData(address, index); } } - if (!everything) { - entry_.Store(); + if (!everything) return; - } // Remove all traces of this entry. backend_->RemoveEntry(this); @@ -939,6 +944,12 @@ bool EntryImpl::CreateBlock(int size, Addr* address) { return true; } +// Note that this method may end up modifying a block file so upon return the +// involved block will be free, and could be reused for something else. If there +// is a crash after that point (and maybe before returning to the caller), the +// entry will be left dirty... and at some point it will be discarded; it is +// important that the entry doesn't keep a reference to this address, or we'll +// end up deleting the contents of |address| once again. void EntryImpl::DeleteData(Addr address, int index) { if (!address.is_initialized()) return; @@ -1038,12 +1049,12 @@ bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) { if (!new_size) { // This is by far the most common scenario. - DeleteData(address, index); backend_->ModifyStorageSize(current_size - unreported_size_[index], 0); entry_.Data()->data_addr[index] = 0; entry_.Data()->data_size[index] = 0; unreported_size_[index] = 0; entry_.Store(); + DeleteData(address, index); user_buffers_[index].reset(); return true; @@ -1115,9 +1126,9 @@ bool EntryImpl::MoveToLocalBuffer(int index) { return false; Addr address(entry_.Data()->data_addr[index]); - DeleteData(address, index); entry_.Data()->data_addr[index] = 0; entry_.Store(); + DeleteData(address, index); // If we lose this entry we'll see it as zero sized. int len = entry_.Data()->data_size[index]; diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h index b3481f0..f1b9251 100644 --- a/net/disk_cache/entry_impl.h +++ b/net/disk_cache/entry_impl.h @@ -144,6 +144,9 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> { bool CreateBlock(int size, Addr* address); // Deletes the data pointed by address, maybe backed by files_[index]. + // Note that most likely the caller should delete (and store) the reference to + // |address| *before* calling this method because we don't want to have an + // entry using an address that is already free. void DeleteData(Addr address, int index); // Updates ranking information. diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc index 8799ed8..ce0c11f 100644 --- a/net/disk_cache/entry_unittest.cc +++ b/net/disk_cache/entry_unittest.cc @@ -22,6 +22,10 @@ extern volatile bool g_cache_tests_error; // Tests that can run with different types of caches. class DiskCacheEntryTest : public DiskCacheTestWithCache { + public: + void InternalSyncIOBackground(disk_cache::Entry* entry); + void ExternalSyncIOBackground(disk_cache::Entry* entry); + protected: void InternalSyncIO(); void InternalAsyncIO(); @@ -45,15 +49,29 @@ class DiskCacheEntryTest : public DiskCacheTestWithCache { void PartialSparseEntry(); }; -// We need to support synchronous IO even though it is not a supported operation -// from the point of view of the disk cache's public interface, because we use -// it internally, not just by a few tests, but as part of the implementation -// (see sparse_control.cc, for example). -void DiskCacheEntryTest::InternalSyncIO() { - disk_cache::Entry* entry = NULL; - ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); - ASSERT_TRUE(NULL != entry); +// Simple task to run part of a test from the cache thread. +class SyncIOTask : public Task { + public: + SyncIOTask(DiskCacheEntryTest* test, disk_cache::Entry* entry) + : test_(test), entry_(entry) {} + + protected: + DiskCacheEntryTest* test_; + disk_cache::Entry* entry_; +}; + +class InternalSyncIOTask : public SyncIOTask { + public: + InternalSyncIOTask(DiskCacheEntryTest* test, disk_cache::Entry* entry) + : SyncIOTask(test, entry) {} + virtual void Run() { + test_->InternalSyncIOBackground(entry_); + } +}; + +// This part of the test runs on the background thread. +void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) { const int kSize1 = 10; scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1); CacheTestFillBuffer(buffer1->data(), kSize1, false); @@ -88,6 +106,19 @@ void DiskCacheEntryTest::InternalSyncIO() { // We need to delete the memory buffer on this thread. EXPECT_EQ(0, entry->WriteData(0, 0, NULL, 0, NULL, true)); EXPECT_EQ(0, entry->WriteData(1, 0, NULL, 0, NULL, true)); +} + +// We need to support synchronous IO even though it is not a supported operation +// from the point of view of the disk cache's public interface, because we use +// it internally, not just by a few tests, but as part of the implementation +// (see sparse_control.cc, for example). +void DiskCacheEntryTest::InternalSyncIO() { + disk_cache::Entry* entry = NULL; + ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); + ASSERT_TRUE(NULL != entry); + + // The bulk of the test runs from within the task, on the cache thread. + RunTaskForTest(new InternalSyncIOTask(this, entry)); entry->Doom(); entry->Close(); @@ -250,10 +281,18 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) { InternalAsyncIO(); } -void DiskCacheEntryTest::ExternalSyncIO() { - disk_cache::Entry* entry; - ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); +class ExternalSyncIOTask : public SyncIOTask { + public: + ExternalSyncIOTask(DiskCacheEntryTest* test, disk_cache::Entry* entry) + : SyncIOTask(test, entry) {} + + virtual void Run() { + test_->ExternalSyncIOBackground(entry_); + } +}; +// This part of the test runs on the background thread. +void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) { const int kSize1 = 17000; const int kSize2 = 25000; scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1); @@ -283,6 +322,14 @@ void DiskCacheEntryTest::ExternalSyncIO() { // We need to delete the memory buffer on this thread. EXPECT_EQ(0, entry->WriteData(0, 0, NULL, 0, NULL, true)); EXPECT_EQ(0, entry->WriteData(1, 0, NULL, 0, NULL, true)); +} + +void DiskCacheEntryTest::ExternalSyncIO() { + disk_cache::Entry* entry; + ASSERT_EQ(net::OK, CreateEntry("the first key", &entry)); + + // The bulk of the test runs from within the task, on the cache thread. + RunTaskForTest(new ExternalSyncIOTask(this, entry)); entry->Doom(); entry->Close(); @@ -635,47 +682,47 @@ void DiskCacheEntryTest::TruncateData() { memset(buffer2->data(), 0, kSize2); // Simple truncation: - EXPECT_EQ(200, entry->WriteData(0, 0, buffer1, 200, NULL, false)); + EXPECT_EQ(200, WriteData(entry, 0, 0, buffer1, 200, false)); EXPECT_EQ(200, entry->GetDataSize(0)); - EXPECT_EQ(100, entry->WriteData(0, 0, buffer1, 100, NULL, false)); + EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1, 100, false)); EXPECT_EQ(200, entry->GetDataSize(0)); - EXPECT_EQ(100, entry->WriteData(0, 0, buffer1, 100, NULL, true)); + EXPECT_EQ(100, WriteData(entry, 0, 0, buffer1, 100, true)); EXPECT_EQ(100, entry->GetDataSize(0)); - EXPECT_EQ(0, entry->WriteData(0, 50, buffer1, 0, NULL, true)); + EXPECT_EQ(0, WriteData(entry, 0, 50, buffer1, 0, true)); EXPECT_EQ(50, entry->GetDataSize(0)); - EXPECT_EQ(0, entry->WriteData(0, 0, buffer1, 0, NULL, true)); + EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1, 0, true)); EXPECT_EQ(0, entry->GetDataSize(0)); entry->Close(); ASSERT_EQ(net::OK, OpenEntry(key, &entry)); // Go to an external file. - EXPECT_EQ(20000, entry->WriteData(0, 0, buffer1, 20000, NULL, true)); + EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1, 20000, true)); EXPECT_EQ(20000, entry->GetDataSize(0)); - EXPECT_EQ(20000, entry->ReadData(0, 0, buffer2, 20000, NULL)); + EXPECT_EQ(20000, ReadData(entry, 0, 0, buffer2, 20000)); EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000)); memset(buffer2->data(), 0, kSize2); // External file truncation - EXPECT_EQ(18000, entry->WriteData(0, 0, buffer1, 18000, NULL, false)); + EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1, 18000, false)); EXPECT_EQ(20000, entry->GetDataSize(0)); - EXPECT_EQ(18000, entry->WriteData(0, 0, buffer1, 18000, NULL, true)); + EXPECT_EQ(18000, WriteData(entry, 0, 0, buffer1, 18000, true)); EXPECT_EQ(18000, entry->GetDataSize(0)); - EXPECT_EQ(0, entry->WriteData(0, 17500, buffer1, 0, NULL, true)); + EXPECT_EQ(0, WriteData(entry, 0, 17500, buffer1, 0, true)); EXPECT_EQ(17500, entry->GetDataSize(0)); // And back to an internal block. - EXPECT_EQ(600, entry->WriteData(0, 1000, buffer1, 600, NULL, true)); + EXPECT_EQ(600, WriteData(entry, 0, 1000, buffer1, 600, true)); EXPECT_EQ(1600, entry->GetDataSize(0)); - EXPECT_EQ(600, entry->ReadData(0, 1000, buffer2, 600, NULL)); + EXPECT_EQ(600, ReadData(entry, 0, 1000, buffer2, 600)); EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600)); - EXPECT_EQ(1000, entry->ReadData(0, 0, buffer2, 1000, NULL)); + EXPECT_EQ(1000, ReadData(entry, 0, 0, buffer2, 1000)); EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000)) << "Preserves previous data"; // Go from external file to zero length. - EXPECT_EQ(20000, entry->WriteData(0, 0, buffer1, 20000, NULL, true)); + EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer1, 20000, true)); EXPECT_EQ(20000, entry->GetDataSize(0)); - EXPECT_EQ(0, entry->WriteData(0, 0, buffer1, 0, NULL, true)); + EXPECT_EQ(0, WriteData(entry, 0, 0, buffer1, 0, true)); EXPECT_EQ(0, entry->GetDataSize(0)); entry->Close(); @@ -1022,6 +1069,36 @@ TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) { InvalidData(); } +// Tests that the cache preserves the buffer of an IO operation. +TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) { + InitCache(); + std::string key("the first key"); + disk_cache::Entry* entry; + ASSERT_EQ(net::OK, CreateEntry(key, &entry)); + + const int kSize = 200; + scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(kSize); + CacheTestFillBuffer(buffer->data(), kSize, false); + + TestCompletionCallback cb; + EXPECT_EQ(net::ERR_IO_PENDING, + entry->WriteData(0, 0, buffer, kSize, &cb, false)); + + // Release our reference to the buffer. + buffer = NULL; + EXPECT_EQ(kSize, cb.WaitForResult()); + + // And now test with a Read(). + buffer = new net::IOBuffer(kSize); + CacheTestFillBuffer(buffer->data(), kSize, false); + + EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadData(0, 0, buffer, kSize, &cb)); + buffer = NULL; + EXPECT_EQ(kSize, cb.WaitForResult()); + + entry->Close(); +} + void DiskCacheEntryTest::DoomNormalEntry() { std::string key("the first key"); disk_cache::Entry* entry; @@ -1279,7 +1356,7 @@ void DiskCacheEntryTest::GetAvailableRange() { rv = entry->GetAvailableRange(0x20F2000, kSize, &start, &cb); EXPECT_EQ(0x2000, cb.GetResult(rv)); EXPECT_EQ(0x20F2000, start); - EXPECT_EQ(0x2000, entry->ReadSparseData(start, buf, kSize, NULL)); + EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf, kSize)); // Make sure that we respect the |len| argument. start = 0; @@ -1576,7 +1653,7 @@ void DiskCacheEntryTest::PartialSparseEntry() { scoped_refptr<net::IOBuffer> buf2 = new net::IOBuffer(kSize); memset(buf2->data(), 0, kSize); - EXPECT_EQ(0, entry->ReadSparseData(8000, buf2, kSize, NULL)); + EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2, kSize)); EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2, kSize)); EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500)); diff --git a/net/disk_cache/in_flight_backend_io.cc b/net/disk_cache/in_flight_backend_io.cc index 0881ca5..e400ac3 100644 --- a/net/disk_cache/in_flight_backend_io.cc +++ b/net/disk_cache/in_flight_backend_io.cc @@ -118,6 +118,11 @@ void BackendIO::FlushQueue() { operation_ = OP_FLUSH_QUEUE; } +void BackendIO::RunTask(Task* task) { + operation_ = OP_RUN_TASK; + task_ = task; +} + void BackendIO::ReadData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf, int buf_len) { operation_ = OP_READ; @@ -221,6 +226,11 @@ void BackendIO::ExecuteBackendOperation() { case OP_FLUSH_QUEUE: result_ = net::OK; break; + case OP_RUN_TASK: + task_->Run(); + delete task_; + result_ = net::OK; + break; default: NOTREACHED() << "Invalid Operation"; result_ = net::ERR_UNEXPECTED; @@ -364,6 +374,12 @@ void InFlightBackendIO::FlushQueue(net::CompletionCallback* callback) { QueueOperation(operation); } +void InFlightBackendIO::RunTask(Task* task, net::CompletionCallback* callback) { + scoped_refptr<BackendIO> operation = new BackendIO(this, backend_, callback); + operation->RunTask(task); + QueueOperation(operation); +} + void InFlightBackendIO::ReadData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf, int buf_len, CompletionCallback* callback) { diff --git a/net/disk_cache/in_flight_backend_io.h b/net/disk_cache/in_flight_backend_io.h index 213c261..889fb20 100644 --- a/net/disk_cache/in_flight_backend_io.h +++ b/net/disk_cache/in_flight_backend_io.h @@ -57,6 +57,7 @@ class BackendIO : public BackgroundIO { void CloseEntryImpl(EntryImpl* entry); void DoomEntryImpl(EntryImpl* entry); void FlushQueue(); // Dummy operation. + void RunTask(Task* task); void ReadData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf, int buf_len); void WriteData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf, @@ -92,6 +93,7 @@ class BackendIO : public BackgroundIO { OP_CLOSE_ENTRY, OP_DOOM_ENTRY, OP_FLUSH_QUEUE, + OP_RUN_TASK, OP_MAX_BACKEND, OP_READ, OP_WRITE, @@ -128,6 +130,7 @@ class BackendIO : public BackgroundIO { int64 offset64_; int64* start_; base::TimeTicks start_time_; + Task* task_; DISALLOW_COPY_AND_ASSIGN(BackendIO); }; @@ -160,6 +163,7 @@ class InFlightBackendIO : public InFlightIO { void CloseEntryImpl(EntryImpl* entry); void DoomEntryImpl(EntryImpl* entry); void FlushQueue(net::CompletionCallback* callback); + void RunTask(Task* task, net::CompletionCallback* callback); void ReadData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* callback); void WriteData(EntryImpl* entry, int index, int offset, net::IOBuffer* buf, diff --git a/net/disk_cache/stats.cc b/net/disk_cache/stats.cc index e69ea00..b69e70d 100644 --- a/net/disk_cache/stats.cc +++ b/net/disk_cache/stats.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -147,8 +147,10 @@ bool Stats::Init(BackendImpl* backend, uint32* storage_addr) { return true; } +Stats::Stats() : backend_(NULL) { +} + Stats::~Stats() { - Store(); } // The array will be filled this way: diff --git a/net/disk_cache/stats.h b/net/disk_cache/stats.h index 19dac58..3042590 100644 --- a/net/disk_cache/stats.h +++ b/net/disk_cache/stats.h @@ -1,4 +1,4 @@ -// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -48,7 +48,7 @@ class Stats { MAX_COUNTER }; - Stats() : backend_(NULL) {} + Stats(); ~Stats(); bool Init(BackendImpl* backend, uint32* storage_addr); diff --git a/net/disk_cache/stress_cache.cc b/net/disk_cache/stress_cache.cc index 3420aaf..98dcbe1 100644 --- a/net/disk_cache/stress_cache.cc +++ b/net/disk_cache/stress_cache.cc @@ -10,6 +10,18 @@ // The child application has two threads: one to exercise the cache in an // infinite loop, and another one to asynchronously kill the process. +// A regular build should never crash. +// To test that the disk cache doesn't generate critical errors with regular +// application level crashes, add the following code and re-compile: +// +// void BackendImpl::CriticalError(int error) { +// NOTREACHED(); +// +// void BackendImpl::ReportError(int error) { +// if (error && error != ERR_PREVIOUS_CRASH) { +// NOTREACHED(); +// } + #include <string> #include <vector> @@ -104,7 +116,11 @@ void StressTheCache(int iteration) { int seed = static_cast<int>(Time::Now().ToInternalValue()); srand(seed); +#ifdef NDEBUG const int kNumKeys = 5000; +#else + const int kNumKeys = 1700; +#endif const int kNumEntries = 30; std::string keys[kNumKeys]; disk_cache::Entry* entries[kNumEntries] = {0}; @@ -120,6 +136,8 @@ void StressTheCache(int iteration) { for (int i = 0;; i++) { int slot = rand() % kNumEntries; int key = rand() % kNumKeys; + bool truncate = rand() % 2 ? false : true; + int size = kSize - (rand() % 4) * kSize / 4; if (entries[slot]) entries[slot]->Close(); @@ -130,9 +148,11 @@ void StressTheCache(int iteration) { CHECK_EQ(net::OK, cb.GetResult(rv)); } - base::snprintf(buffer->data(), kSize, "%d %d", iteration, i); - rv = entries[slot]->WriteData(0, 0, buffer, kSize, &cb, false); - CHECK_EQ(kSize, cb.GetResult(rv)); + base::snprintf(buffer->data(), kSize, + "i: %d iter: %d, size: %d, truncate: %d", i, iteration, size, + truncate ? 1 : 0); + rv = entries[slot]->WriteData(0, 0, buffer, size, &cb, truncate); + CHECK_EQ(size, cb.GetResult(rv)); if (rand() % 100 > 80) { key = rand() % kNumKeys; |