diff options
Diffstat (limited to 'net/disk_cache')
-rw-r--r-- | net/disk_cache/bitmap.cc | 63 | ||||
-rw-r--r-- | net/disk_cache/bitmap.h | 26 | ||||
-rw-r--r-- | net/disk_cache/block_files.cc | 170 | ||||
-rw-r--r-- | net/disk_cache/disk_cache_test_base.cc | 193 | ||||
-rw-r--r-- | net/disk_cache/disk_cache_test_base.h | 12 | ||||
-rw-r--r-- | net/disk_cache/mem_entry_impl.cc | 160 | ||||
-rw-r--r-- | net/disk_cache/mem_entry_impl.h | 44 | ||||
-rw-r--r-- | net/disk_cache/stats_histogram.cc | 12 |
8 files changed, 354 insertions, 326 deletions
diff --git a/net/disk_cache/bitmap.cc b/net/disk_cache/bitmap.cc index e025090..6c9aceb 100644 --- a/net/disk_cache/bitmap.cc +++ b/net/disk_cache/bitmap.cc @@ -4,6 +4,8 @@ #include "net/disk_cache/bitmap.h" +#include <algorithm> + #include "base/logging.h" namespace { @@ -38,6 +40,31 @@ int FindLSBNonEmpty(uint32 word, bool value) { namespace disk_cache { +Bitmap::Bitmap(int num_bits, bool clear_bits) + : num_bits_(num_bits), + array_size_(RequiredArraySize(num_bits)), + alloc_(true) { + map_ = new uint32[array_size_]; + + // Initialize all of the bits. + if (clear_bits) + Clear(); +} + +Bitmap::Bitmap(uint32* map, int num_bits, int num_words) + : map_(map), + num_bits_(num_bits), + // If size is larger than necessary, trim because array_size_ is used + // as a bound by various methods. + array_size_(std::min(RequiredArraySize(num_bits), num_words)), + alloc_(false) { +} + +Bitmap::~Bitmap() { + if (alloc_) + delete [] map_; +} + void Bitmap::Resize(int num_bits, bool clear_bits) { DCHECK(alloc_ || !map_); const int old_maxsize = num_bits_; @@ -105,24 +132,6 @@ void Bitmap::SetMap(const uint32* map, int size) { memcpy(map_, map, std::min(size, array_size_) * sizeof(*map_)); } -void Bitmap::SetWordBits(int start, int len, bool value) { - DCHECK_LT(len, kIntBits); - DCHECK_GE(len, 0); - if (!len) - return; - - int word = start / kIntBits; - int offset = start % kIntBits; - - uint32 to_add = 0xffffffff << len; - to_add = (~to_add) << offset; - if (value) { - map_[word] |= to_add; - } else { - map_[word] &= ~to_add; - } -} - void Bitmap::SetRange(int begin, int end, bool value) { DCHECK_LE(begin, end); int start_offset = begin & (kIntBits - 1); @@ -281,4 +290,22 @@ int Bitmap::FindBits(int* index, int limit, bool value) const { return end - *index; } +void Bitmap::SetWordBits(int start, int len, bool value) { + DCHECK_LT(len, kIntBits); + DCHECK_GE(len, 0); + if (!len) + return; + + int word = start / kIntBits; + int offset = start % kIntBits; + + uint32 to_add = 0xffffffff << len; + to_add = (~to_add) << offset; + if (value) { + map_[word] |= to_add; + } else { + map_[word] &= ~to_add; + } +} + } // namespace disk_cache diff --git a/net/disk_cache/bitmap.h b/net/disk_cache/bitmap.h index 8e24e15..8b3324c 100644 --- a/net/disk_cache/bitmap.h +++ b/net/disk_cache/bitmap.h @@ -6,8 +6,6 @@ #define NET_DISK_CACHE_BITMAP_H_ #pragma once -#include <algorithm> - #include "base/basictypes.h" namespace disk_cache { @@ -19,30 +17,14 @@ class Bitmap { // This constructor will allocate on a uint32 boundary. If |clear_bits| is // false, the bitmap bits will not be initialized. - Bitmap(int num_bits, bool clear_bits) - : num_bits_(num_bits), array_size_(RequiredArraySize(num_bits)), - alloc_(true) { - map_ = new uint32[array_size_]; - - // Initialize all of the bits. - if (clear_bits) - Clear(); - } + Bitmap(int num_bits, bool clear_bits); // Constructs a Bitmap with the actual storage provided by the caller. |map| // has to be valid until this object destruction. |num_bits| is the number of // bits in the bitmap, and |num_words| is the size of |map| in 32-bit words. - Bitmap(uint32* map, int num_bits, int num_words) - : map_(map), num_bits_(num_bits), - // If size is larger than necessary, trim because array_size_ is used - // as a bound by various methods. - array_size_(std::min(RequiredArraySize(num_bits), num_words)), - alloc_(false) {} - - ~Bitmap() { - if (alloc_) - delete[] map_; - } + Bitmap(uint32* map, int num_bits, int num_words); + + ~Bitmap(); // Resizes the bitmap. // If |num_bits| < Size(), the extra bits will be discarded. diff --git a/net/disk_cache/block_files.cc b/net/disk_cache/block_files.cc index 3eb4e35..faa9706 100644 --- a/net/disk_cache/block_files.cc +++ b/net/disk_cache/block_files.cc @@ -220,6 +220,91 @@ bool BlockFiles::Init(bool create_files) { return true; } +MappedFile* BlockFiles::GetFile(Addr address) { + DCHECK(thread_checker_->CalledOnValidThread()); + DCHECK(block_files_.size() >= 4); + DCHECK(address.is_block_file() || !address.is_initialized()); + if (!address.is_initialized()) + return NULL; + + int file_index = address.FileNumber(); + if (static_cast<unsigned int>(file_index) >= block_files_.size() || + !block_files_[file_index]) { + // We need to open the file + if (!OpenBlockFile(file_index)) + return NULL; + } + DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index)); + return block_files_[file_index]; +} + +bool BlockFiles::CreateBlock(FileType block_type, int block_count, + Addr* block_address) { + DCHECK(thread_checker_->CalledOnValidThread()); + if (block_type < RANKINGS || block_type > BLOCK_4K || + block_count < 1 || block_count > 4) + return false; + if (!init_) + return false; + + MappedFile* file = FileForNewBlock(block_type, block_count); + if (!file) + return false; + + BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); + + int target_size = 0; + for (int i = block_count; i <= 4; i++) { + if (header->empty[i - 1]) { + target_size = i; + break; + } + } + + DCHECK(target_size); + int index; + if (!CreateMapBlock(target_size, block_count, header, &index)) + return false; + + Addr address(block_type, block_count, header->this_file, index); + block_address->set_value(address.value()); + Trace("CreateBlock 0x%x", address.value()); + return true; +} + +void BlockFiles::DeleteBlock(Addr address, bool deep) { + DCHECK(thread_checker_->CalledOnValidThread()); + if (!address.is_initialized() || address.is_separate_file()) + return; + + if (!zero_buffer_) { + zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]; + memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4); + } + MappedFile* file = GetFile(address); + if (!file) + return; + + Trace("DeleteBlock 0x%x", address.value()); + + BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); + DeleteMapBlock(address.start_block(), address.num_blocks(), header); + + size_t size = address.BlockSize() * address.num_blocks(); + size_t offset = address.start_block() * address.BlockSize() + + kBlockHeaderSize; + if (deep) + file->Write(zero_buffer_, size, offset); + + if (!header->num_entries) { + // This file is now empty. Let's try to delete it. + FileType type = Addr::RequiredFileType(header->entry_size); + if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size) + type = RANKINGS; + RemoveEmptyFile(type); + } +} + void BlockFiles::CloseFiles() { if (init_) { DCHECK(thread_checker_->CalledOnValidThread()); @@ -346,24 +431,6 @@ bool BlockFiles::OpenBlockFile(int index) { return true; } -MappedFile* BlockFiles::GetFile(Addr address) { - DCHECK(thread_checker_->CalledOnValidThread()); - DCHECK(block_files_.size() >= 4); - DCHECK(address.is_block_file() || !address.is_initialized()); - if (!address.is_initialized()) - return NULL; - - int file_index = address.FileNumber(); - if (static_cast<unsigned int>(file_index) >= block_files_.size() || - !block_files_[file_index]) { - // We need to open the file - if (!OpenBlockFile(file_index)) - return NULL; - } - DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index)); - return block_files_[file_index]; -} - bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) { if (kMaxBlocks == header->max_entries) return false; @@ -489,73 +556,6 @@ void BlockFiles::RemoveEmptyFile(FileType block_type) { } } -bool BlockFiles::CreateBlock(FileType block_type, int block_count, - Addr* block_address) { - DCHECK(thread_checker_->CalledOnValidThread()); - if (block_type < RANKINGS || block_type > BLOCK_4K || - block_count < 1 || block_count > 4) - return false; - if (!init_) - return false; - - MappedFile* file = FileForNewBlock(block_type, block_count); - if (!file) - return false; - - BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); - - int target_size = 0; - for (int i = block_count; i <= 4; i++) { - if (header->empty[i - 1]) { - target_size = i; - break; - } - } - - DCHECK(target_size); - int index; - if (!CreateMapBlock(target_size, block_count, header, &index)) - return false; - - Addr address(block_type, block_count, header->this_file, index); - block_address->set_value(address.value()); - Trace("CreateBlock 0x%x", address.value()); - return true; -} - -void BlockFiles::DeleteBlock(Addr address, bool deep) { - DCHECK(thread_checker_->CalledOnValidThread()); - if (!address.is_initialized() || address.is_separate_file()) - return; - - if (!zero_buffer_) { - zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]; - memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4); - } - MappedFile* file = GetFile(address); - if (!file) - return; - - Trace("DeleteBlock 0x%x", address.value()); - - BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); - DeleteMapBlock(address.start_block(), address.num_blocks(), header); - - size_t size = address.BlockSize() * address.num_blocks(); - size_t offset = address.start_block() * address.BlockSize() + - kBlockHeaderSize; - if (deep) - file->Write(zero_buffer_, size, offset); - - if (!header->num_entries) { - // This file is now empty. Let's try to delete it. - FileType type = Addr::RequiredFileType(header->entry_size); - if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size) - type = RANKINGS; - RemoveEmptyFile(type); - } -} - bool BlockFiles::FixBlockFileHeader(MappedFile* file) { BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); int file_size = static_cast<int>(file->GetLength()); diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc index 6c9b91c..3860667 100644 --- a/net/disk_cache/disk_cache_test_base.cc +++ b/net/disk_cache/disk_cache_test_base.cc @@ -15,15 +15,25 @@ void DiskCacheTest::TearDown() { MessageLoop::current()->RunAllPending(); } -void DiskCacheTestWithCache::SetMaxSize(int size) { - size_ = size; - if (cache_impl_) - EXPECT_TRUE(cache_impl_->SetMaxSize(size)); - - if (mem_cache_) - EXPECT_TRUE(mem_cache_->SetMaxSize(size)); +DiskCacheTestWithCache::DiskCacheTestWithCache() + : cache_(NULL), + cache_impl_(NULL), + mem_cache_(NULL), + mask_(0), + size_(0), + type_(net::DISK_CACHE), + memory_only_(false), + implementation_(false), + force_creation_(false), + new_eviction_(false), + first_cleanup_(true), + integrity_(true), + use_current_thread_(false), + cache_thread_("CacheThread") { } +DiskCacheTestWithCache::~DiskCacheTestWithCache() {} + void DiskCacheTestWithCache::InitCache() { if (mask_ || new_eviction_) implementation_ = true; @@ -38,86 +48,6 @@ void DiskCacheTestWithCache::InitCache() { ASSERT_EQ(0, cache_->GetEntryCount()); } -void DiskCacheTestWithCache::InitMemoryCache() { - if (!implementation_) { - cache_ = disk_cache::MemBackendImpl::CreateBackend(size_); - return; - } - - mem_cache_ = new disk_cache::MemBackendImpl(); - cache_ = mem_cache_; - ASSERT_TRUE(NULL != cache_); - - if (size_) - EXPECT_TRUE(mem_cache_->SetMaxSize(size_)); - - ASSERT_TRUE(mem_cache_->Init()); -} - -void DiskCacheTestWithCache::InitDiskCache() { - FilePath path = GetCacheFilePath(); - if (first_cleanup_) - ASSERT_TRUE(DeleteCache(path)); - - if (!cache_thread_.IsRunning()) { - EXPECT_TRUE(cache_thread_.StartWithOptions( - base::Thread::Options(MessageLoop::TYPE_IO, 0))); - } - ASSERT_TRUE(cache_thread_.message_loop() != NULL); - - if (implementation_) - return InitDiskCacheImpl(path); - - scoped_refptr<base::MessageLoopProxy> thread = - use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() : - cache_thread_.message_loop_proxy(); - - TestCompletionCallback cb; - int rv = disk_cache::BackendImpl::CreateBackend( - path, force_creation_, size_, type_, - disk_cache::kNoRandom, thread, NULL, &cache_, &cb); - ASSERT_EQ(net::OK, cb.GetResult(rv)); -} - -void DiskCacheTestWithCache::InitDiskCacheImpl(const FilePath& path) { - scoped_refptr<base::MessageLoopProxy> thread = - use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() : - cache_thread_.message_loop_proxy(); - if (mask_) - cache_impl_ = new disk_cache::BackendImpl(path, mask_, thread, NULL); - else - cache_impl_ = new disk_cache::BackendImpl(path, thread, NULL); - - cache_ = cache_impl_; - ASSERT_TRUE(NULL != cache_); - - if (size_) - EXPECT_TRUE(cache_impl_->SetMaxSize(size_)); - - if (new_eviction_) - cache_impl_->SetNewEviction(); - - cache_impl_->SetType(type_); - cache_impl_->SetFlags(disk_cache::kNoRandom); - TestCompletionCallback cb; - int rv = cache_impl_->Init(&cb); - ASSERT_EQ(net::OK, cb.GetResult(rv)); -} - -void DiskCacheTestWithCache::TearDown() { - MessageLoop::current()->RunAllPending(); - delete cache_; - if (cache_thread_.IsRunning()) - cache_thread_.Stop(); - - if (!memory_only_ && integrity_) { - FilePath path = GetCacheFilePath(); - EXPECT_TRUE(CheckCacheIntegrity(path, new_eviction_)); - } - - PlatformTest::TearDown(); -} - // We are expected to leak memory when simulating crashes. void DiskCacheTestWithCache::SimulateCrash() { ASSERT_TRUE(implementation_ && !memory_only_); @@ -138,6 +68,15 @@ void DiskCacheTestWithCache::SetTestMode() { cache_impl_->SetUnitTestMode(); } +void DiskCacheTestWithCache::SetMaxSize(int size) { + size_ = size; + if (cache_impl_) + EXPECT_TRUE(cache_impl_->SetMaxSize(size)); + + if (mem_cache_) + EXPECT_TRUE(mem_cache_->SetMaxSize(size)); +} + int DiskCacheTestWithCache::OpenEntry(const std::string& key, disk_cache::Entry** entry) { TestCompletionCallback cb; @@ -236,3 +175,83 @@ int DiskCacheTestWithCache::WriteSparseData(disk_cache::Entry* entry, int rv = entry->WriteSparseData(offset, buf, len, &cb); return cb.GetResult(rv); } + +void DiskCacheTestWithCache::TearDown() { + MessageLoop::current()->RunAllPending(); + delete cache_; + if (cache_thread_.IsRunning()) + cache_thread_.Stop(); + + if (!memory_only_ && integrity_) { + FilePath path = GetCacheFilePath(); + EXPECT_TRUE(CheckCacheIntegrity(path, new_eviction_)); + } + + PlatformTest::TearDown(); +} + +void DiskCacheTestWithCache::InitMemoryCache() { + if (!implementation_) { + cache_ = disk_cache::MemBackendImpl::CreateBackend(size_); + return; + } + + mem_cache_ = new disk_cache::MemBackendImpl(); + cache_ = mem_cache_; + ASSERT_TRUE(NULL != cache_); + + if (size_) + EXPECT_TRUE(mem_cache_->SetMaxSize(size_)); + + ASSERT_TRUE(mem_cache_->Init()); +} + +void DiskCacheTestWithCache::InitDiskCache() { + FilePath path = GetCacheFilePath(); + if (first_cleanup_) + ASSERT_TRUE(DeleteCache(path)); + + if (!cache_thread_.IsRunning()) { + EXPECT_TRUE(cache_thread_.StartWithOptions( + base::Thread::Options(MessageLoop::TYPE_IO, 0))); + } + ASSERT_TRUE(cache_thread_.message_loop() != NULL); + + if (implementation_) + return InitDiskCacheImpl(path); + + scoped_refptr<base::MessageLoopProxy> thread = + use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() : + cache_thread_.message_loop_proxy(); + + TestCompletionCallback cb; + int rv = disk_cache::BackendImpl::CreateBackend( + path, force_creation_, size_, type_, + disk_cache::kNoRandom, thread, NULL, &cache_, &cb); + ASSERT_EQ(net::OK, cb.GetResult(rv)); +} + +void DiskCacheTestWithCache::InitDiskCacheImpl(const FilePath& path) { + scoped_refptr<base::MessageLoopProxy> thread = + use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() : + cache_thread_.message_loop_proxy(); + if (mask_) + cache_impl_ = new disk_cache::BackendImpl(path, mask_, thread, NULL); + else + cache_impl_ = new disk_cache::BackendImpl(path, thread, NULL); + + cache_ = cache_impl_; + ASSERT_TRUE(NULL != cache_); + + if (size_) + EXPECT_TRUE(cache_impl_->SetMaxSize(size_)); + + if (new_eviction_) + cache_impl_->SetNewEviction(); + + cache_impl_->SetType(type_); + cache_impl_->SetFlags(disk_cache::kNoRandom); + TestCompletionCallback cb; + int rv = cache_impl_->Init(&cb); + ASSERT_EQ(net::OK, cb.GetResult(rv)); +} diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h index 13b3879..0fd98b8 100644 --- a/net/disk_cache/disk_cache_test_base.h +++ b/net/disk_cache/disk_cache_test_base.h @@ -40,15 +40,10 @@ class DiskCacheTest : public PlatformTest { // Provides basic support for cache related tests. class DiskCacheTestWithCache : public DiskCacheTest { protected: - DiskCacheTestWithCache() - : cache_(NULL), cache_impl_(NULL), mem_cache_(NULL), mask_(0), size_(0), - type_(net::DISK_CACHE), memory_only_(false), implementation_(false), - force_creation_(false), new_eviction_(false), first_cleanup_(true), - integrity_(true), use_current_thread_(false), - cache_thread_("CacheThread") {} + DiskCacheTestWithCache(); + virtual ~DiskCacheTestWithCache(); void InitCache(); - virtual void TearDown(); void SimulateCrash(); void SetTestMode(); @@ -112,6 +107,9 @@ class DiskCacheTestWithCache : public DiskCacheTest { int WriteSparseData(disk_cache::Entry* entry, int64 offset, net::IOBuffer* buf, int len); + // DiskCacheTest: + virtual void TearDown(); + // cache_ will always have a valid object, regardless of how the cache was // initialized. The implementation pointers can be NULL. disk_cache::Backend* cache_; diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc index a9e599c..30599f4 100644 --- a/net/disk_cache/mem_entry_impl.cc +++ b/net/disk_cache/mem_entry_impl.cc @@ -48,12 +48,66 @@ MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) { data_size_[i] = 0; } -MemEntryImpl::~MemEntryImpl() { - for (int i = 0; i < NUM_STREAMS; i++) - backend_->ModifyStorageSize(data_size_[i], 0); - backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0); +// ------------------------------------------------------------------------ + +bool MemEntryImpl::CreateEntry(const std::string& key) { + key_ = key; + Time current = Time::Now(); + last_modified_ = current; + last_used_ = current; + Open(); + backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); + return true; +} + +void MemEntryImpl::InternalDoom() { + doomed_ = true; + if (!ref_count_) { + if (type() == kParentEntry) { + // If this is a parent entry, we need to doom all the child entries. + if (children_.get()) { + EntryMap children; + children.swap(*children_); + for (EntryMap::iterator i = children.begin(); + i != children.end(); ++i) { + // Since a pointer to this object is also saved in the map, avoid + // dooming it. + if (i->second != this) + i->second->Doom(); + } + DCHECK(children_->size() == 0); + } + } else { + // If this is a child entry, detach it from the parent. + parent_->DetachChild(child_id_); + } + delete this; + } +} + +void MemEntryImpl::Open() { + // Only a parent entry can be opened. + // TODO(hclam): make sure it's correct to not apply the concept of ref + // counting to child entry. + DCHECK(type() == kParentEntry); + ref_count_++; + DCHECK(ref_count_ >= 0); + DCHECK(!doomed_); +} + +bool MemEntryImpl::InUse() { + if (type() == kParentEntry) { + return ref_count_ > 0; + } else { + // A child entry is always not in use. The consequence is that a child entry + // can always be evicted while the associated parent entry is currently in + // used (i.e. opened). + return false; + } } +// ------------------------------------------------------------------------ + void MemEntryImpl::Doom() { if (doomed_) return; @@ -263,6 +317,29 @@ int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, return io_buf->BytesConsumed(); } +int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start, + CompletionCallback* callback) { + return GetAvailableRange(offset, len, start); +} + +bool MemEntryImpl::CouldBeSparse() const { + DCHECK_EQ(kParentEntry, type()); + return (children_.get() != NULL); +} + +int MemEntryImpl::ReadyForSparseIO( + net::CompletionCallback* completion_callback) { + return net::OK; +} + +// ------------------------------------------------------------------------ + +MemEntryImpl::~MemEntryImpl() { + for (int i = 0; i < NUM_STREAMS; i++) + backend_->ModifyStorageSize(data_size_[i], 0); + backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0); +} + int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { DCHECK(type() == kParentEntry); DCHECK(start); @@ -307,81 +384,6 @@ int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { return 0; } -int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start, - CompletionCallback* callback) { - return GetAvailableRange(offset, len, start); -} - -bool MemEntryImpl::CouldBeSparse() const { - DCHECK_EQ(kParentEntry, type()); - return (children_.get() != NULL); -} - -int MemEntryImpl::ReadyForSparseIO( - net::CompletionCallback* completion_callback) { - return net::OK; -} - -// ------------------------------------------------------------------------ - -bool MemEntryImpl::CreateEntry(const std::string& key) { - key_ = key; - Time current = Time::Now(); - last_modified_ = current; - last_used_ = current; - Open(); - backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); - return true; -} - -void MemEntryImpl::InternalDoom() { - doomed_ = true; - if (!ref_count_) { - if (type() == kParentEntry) { - // If this is a parent entry, we need to doom all the child entries. - if (children_.get()) { - EntryMap children; - children.swap(*children_); - for (EntryMap::iterator i = children.begin(); - i != children.end(); ++i) { - // Since a pointer to this object is also saved in the map, avoid - // dooming it. - if (i->second != this) - i->second->Doom(); - } - DCHECK(children_->size() == 0); - } - } else { - // If this is a child entry, detach it from the parent. - parent_->DetachChild(child_id_); - } - delete this; - } -} - -void MemEntryImpl::Open() { - // Only a parent entry can be opened. - // TODO(hclam): make sure it's correct to not apply the concept of ref - // counting to child entry. - DCHECK(type() == kParentEntry); - ref_count_++; - DCHECK(ref_count_ >= 0); - DCHECK(!doomed_); -} - -bool MemEntryImpl::InUse() { - if (type() == kParentEntry) { - return ref_count_ > 0; - } else { - // A child entry is always not in use. The consequence is that a child entry - // can always be evicted while the associated parent entry is currently in - // used (i.e. opened). - return false; - } -} - -// ------------------------------------------------------------------------ - void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) { int entry_size = GetDataSize(index); diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h index 573a306..f4ac4ef 100644 --- a/net/disk_cache/mem_entry_impl.h +++ b/net/disk_cache/mem_entry_impl.h @@ -52,28 +52,6 @@ class MemEntryImpl : public Entry { explicit MemEntryImpl(MemBackendImpl* backend); - // Entry interface. - virtual void Doom(); - virtual void Close(); - virtual std::string GetKey() const; - virtual base::Time GetLastUsed() const; - virtual base::Time GetLastModified() const; - virtual int32 GetDataSize(int index) const; - virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, - net::CompletionCallback* completion_callback); - virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, - net::CompletionCallback* completion_callback, - bool truncate); - virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - net::CompletionCallback* completion_callback); - virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, - net::CompletionCallback* completion_callback); - virtual int GetAvailableRange(int64 offset, int len, int64* start, - CompletionCallback* callback); - virtual bool CouldBeSparse() const; - virtual void CancelSparseIO() {} - virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback); - // Performs the initialization of a EntryImpl that will be added to the // cache. bool CreateEntry(const std::string& key); @@ -104,6 +82,28 @@ class MemEntryImpl : public Entry { return parent_ ? kChildEntry : kParentEntry; } + // Entry interface. + virtual void Doom(); + virtual void Close(); + virtual std::string GetKey() const; + virtual base::Time GetLastUsed() const; + virtual base::Time GetLastModified() const; + virtual int32 GetDataSize(int index) const; + virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, + net::CompletionCallback* completion_callback); + virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, + net::CompletionCallback* completion_callback, + bool truncate); + virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, + net::CompletionCallback* completion_callback); + virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, + net::CompletionCallback* completion_callback); + virtual int GetAvailableRange(int64 offset, int len, int64* start, + CompletionCallback* callback); + virtual bool CouldBeSparse() const; + virtual void CancelSparseIO() {} + virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback); + private: typedef base::hash_map<int, MemEntryImpl*> EntryMap; diff --git a/net/disk_cache/stats_histogram.cc b/net/disk_cache/stats_histogram.cc index 366a7e1..39c9056 100644 --- a/net/disk_cache/stats_histogram.cc +++ b/net/disk_cache/stats_histogram.cc @@ -15,6 +15,12 @@ using base::StatisticsRecorder; // Static. const Stats* StatsHistogram::stats_ = NULL; +StatsHistogram::~StatsHistogram() { + // Only cleanup what we set. + if (init_) + stats_ = NULL; +} + scoped_refptr<StatsHistogram> StatsHistogram::StatsHistogramFactoryGet( const std::string& name) { scoped_refptr<Histogram> histogram(NULL); @@ -59,12 +65,6 @@ bool StatsHistogram::Init(const Stats* stats) { return true; } -StatsHistogram::~StatsHistogram() { - // Only cleanup what we set. - if (init_) - stats_ = NULL; -} - Histogram::Sample StatsHistogram::ranges(size_t i) const { DCHECK(stats_); return stats_->GetBucketRange(i); |