diff options
author | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2008-07-31 21:28:48 +0000 |
---|---|---|
committer | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2008-07-31 21:28:48 +0000 |
commit | bdad42a47f2dfa5783554844d54337454256a0a3 (patch) | |
tree | 915326e540a38ccc7f393029313490bb68d071d5 | |
parent | d0a918234ecc13dbf5fa11fad7122f7e81e38840 (diff) | |
download | chromium_src-bdad42a47f2dfa5783554844d54337454256a0a3.zip chromium_src-bdad42a47f2dfa5783554844d54337454256a0a3.tar.gz chromium_src-bdad42a47f2dfa5783554844d54337454256a0a3.tar.bz2 |
Change the order of functions on the cc file to follow the order of the header. No actual code change.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@200 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | net/disk_cache/backend_impl.cc | 869 | ||||
-rw-r--r-- | net/disk_cache/backend_impl.h | 107 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.cc | 485 |
3 files changed, 735 insertions, 726 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc index cf8237e..930f4a3 100644 --- a/net/disk_cache/backend_impl.cc +++ b/net/disk_cache/backend_impl.cc @@ -302,72 +302,7 @@ BackendImpl::~BackendImpl() { DCHECK(!num_refs_); } -bool BackendImpl::InitBackingStore(bool* file_created) { - // This call fails if the folder exists. - file_util::CreateDirectory(path_); - - std::wstring index_name(path_); - file_util::AppendToPath(&index_name, kIndexName); - - HANDLE file = CreateFile(index_name.c_str(), GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ, NULL, OPEN_ALWAYS, 0, NULL); - - if (INVALID_HANDLE_VALUE == file) - return false; - - bool ret = true; - if (ERROR_ALREADY_EXISTS != GetLastError()) { - *file_created = true; - ret = CreateBackingStore(file); - } else { - *file_created = false; - } - - CloseHandle(file); - if (!ret) - return false; - - index_ = new MappedFile(); - data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0)); - return true; -} - -// We just created a new file so we're going to write the header and set the -// file length to include the hash table (zero filled). -bool BackendImpl::CreateBackingStore(HANDLE file) { - AdjustMaxCacheSize(0); - - IndexHeader header; - header.table_len = DesiredIndexTableLen(max_size_); - - DWORD actual; - if (!WriteFile(file, &header, sizeof(header), &actual, NULL) || - sizeof(header) != actual) - return false; - - LONG size = static_cast<LONG>(GetIndexSize(header.table_len)); - - if (INVALID_SET_FILE_POINTER == SetFilePointer(file, size, NULL, FILE_BEGIN)) - return false; - - if (!SetEndOfFile(file)) - return false; - - return true; -} - -bool BackendImpl::SetMaxSize(int max_bytes) { - COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); - if (max_bytes < 0) - return false; - - // Zero size means use the default. - if (!max_bytes) - return true; - - max_size_ = max_bytes; - return true; -} +// ------------------------------------------------------------------------ int32 BackendImpl::GetEntryCount() const { if (!index_) @@ -465,85 +400,6 @@ bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) { return true; } -EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, - bool find_parent) { - Addr address(data_->table[hash & mask_]); - EntryImpl* cache_entry = NULL; - EntryImpl* parent_entry = NULL; - bool found = false; - - for (;;) { - if (disabled_) - break; - - if (!address.is_initialized()) { - if (find_parent) - found = true; - break; - } - - bool dirty; - int error = NewEntry(address, &cache_entry, &dirty); - - if (error || dirty) { - // This entry is dirty on disk (it was not properly closed): we cannot - // trust it. - Addr child(0); - if (!error) - child.set_value(cache_entry->GetNextAddress()); - - if (parent_entry) { - parent_entry->SetNextAddress(child); - parent_entry->Release(); - parent_entry = NULL; - } else { - data_->table[hash & mask_] = child.value(); - } - - if (!error) { - // It is important to call DestroyInvalidEntry after removing this - // entry from the table. - DestroyInvalidEntry(address, cache_entry); - cache_entry->Release(); - cache_entry = NULL; - } else { - Trace("NewEntry failed on MatchEntry 0x%x", address.value()); - } - - // Restart the search. - address.set_value(data_->table[hash & mask_]); - continue; - } - - if (cache_entry->IsSameEntry(key, hash)) { - cache_entry = EntryImpl::Update(cache_entry); - found = true; - break; - } - cache_entry = EntryImpl::Update(cache_entry); - if (parent_entry) - parent_entry->Release(); - parent_entry = cache_entry; - cache_entry = NULL; - if (!parent_entry) - break; - - address.set_value(parent_entry->GetNextAddress()); - } - - if (parent_entry && (!find_parent || !found)) { - parent_entry->Release(); - parent_entry = NULL; - } - - if (cache_entry && (find_parent || !found)) { - cache_entry->Release(); - cache_entry = NULL; - } - - return find_parent ? parent_entry : cache_entry; -} - bool BackendImpl::DoomEntry(const std::string& key) { if (disabled_) return false; @@ -557,30 +413,6 @@ bool BackendImpl::DoomEntry(const std::string& key) { return true; } -void BackendImpl::InternalDoomEntry(EntryImpl* entry) { - uint32 hash = entry->GetHash(); - std::string key = entry->GetKey(); - EntryImpl* parent_entry = MatchEntry(key, hash, true); - CacheAddr child(entry->GetNextAddress()); - - Trace("Doom entry 0x%p", entry); - - rankings_.Remove(entry->rankings()); - - entry->InternalDoom(); - - if (parent_entry) { - parent_entry->SetNextAddress(Addr(child)); - parent_entry->Release(); - } else { - data_->table[hash & mask_] = child; - } - - data_->header.num_entries--; - DCHECK(data_->header.num_entries >= 0); - stats_.OnEvent(Stats::DOOM_ENTRY); -} - bool BackendImpl::DoomAllEntries() { if (!num_refs_) { index_ = NULL; @@ -744,53 +576,324 @@ void BackendImpl::GetStats(StatsItems* stats) { stats_.GetItems(stats); } -void BackendImpl::TrimCache(bool empty) { - Trace("*** Trim Cache ***"); - if (disabled_) - return; +// ------------------------------------------------------------------------ - Rankings::ScopedRankingsBlock node(&rankings_); - Rankings::ScopedRankingsBlock next(&rankings_, rankings_.GetPrev(node.get())); - DCHECK(next.get()); - int target_size = empty ? 0 : LowWaterAdjust(max_size_); - while (data_->header.num_bytes > target_size && next.get()) { - node.reset(next.release()); - next.reset(rankings_.GetPrev(node.get())); - if (!node->Data()->pointer || empty) { - // This entry is not being used by anybody. - EntryImpl* entry; - bool dirty; - if (NewEntry(Addr(node->Data()->contents), &entry, &dirty)) { - Trace("NewEntry failed on Trim 0x%x", node->address().value()); - continue; - } +bool BackendImpl::SetMaxSize(int max_bytes) { + COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); + if (max_bytes < 0) + return false; - if (node->Data()->pointer) { - entry = EntryImpl::Update(entry); - } - entry->Doom(); - entry->Release(); - if (!empty) - stats_.OnEvent(Stats::TRIM_ENTRY); + // Zero size means use the default. + if (!max_bytes) + return true; + + max_size_ = max_bytes; + return true; +} + +std::wstring BackendImpl::GetFileName(Addr address) const { + if (!address.is_separate_file() || !address.is_initialized()) { + NOTREACHED(); + return std::wstring(); + } + + std::wstring name = StringPrintf(L"%s\\f_%06x", path_.c_str(), + address.FileNumber()); + return name; +} + +MappedFile* BackendImpl::File(Addr address) { + if (disabled_) + return NULL; + return block_files_.GetFile(address); +} + +bool BackendImpl::CreateExternalFile(Addr* address) { + int file_number = data_->header.last_file + 1; + Addr file_address(0); + bool success = false; + for (int i = 0; (i < 0x0fffffff) && !success; i++) { + if (!file_address.SetFileNumber(file_number)) { + file_number = 1; + continue; } + std::wstring name = GetFileName(file_address); + ScopedHandle file(CreateFile(name.c_str(), GENERIC_WRITE | GENERIC_READ, + FILE_SHARE_READ, NULL, CREATE_ALWAYS, 0, + NULL)); + if (!file.IsValid()) + continue; + + success = true; } - Trace("*** Trim Cache end ***"); - return; + DCHECK(success); + if (!success) + return false; + + data_->header.last_file = file_number; + address->set_value(file_address.value()); + return true; } -void BackendImpl::DestroyInvalidEntry(Addr address, EntryImpl* entry) { - LOG(WARNING) << "Destroying invalid entry."; - Trace("Destroying invalid entry 0x%p", entry); +bool BackendImpl::CreateBlock(FileType block_type, int block_count, + Addr* block_address) { + return block_files_.CreateBlock(block_type, block_count, block_address); +} + +void BackendImpl::DeleteBlock(Addr block_address, bool deep) { + block_files_.DeleteBlock(block_address, deep); +} + +void BackendImpl::UpdateRank(CacheRankingsBlock* node, bool modified) { + rankings_.UpdateRank(node, modified); +} + +void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { + Addr address(rankings->Data()->contents); + EntryImpl* cache_entry = NULL; + bool dirty; + if (NewEntry(address, &cache_entry, &dirty)) + return; + + uint32 hash = cache_entry->GetHash(); + cache_entry->Release(); + + // Anything on the table means that this entry is there. + if (data_->table[hash & mask_]) + return; + + data_->table[hash & mask_] = address.value(); +} + +void BackendImpl::InternalDoomEntry(EntryImpl* entry) { + uint32 hash = entry->GetHash(); + std::string key = entry->GetKey(); + EntryImpl* parent_entry = MatchEntry(key, hash, true); + CacheAddr child(entry->GetNextAddress()); + + Trace("Doom entry 0x%p", entry); rankings_.Remove(entry->rankings()); - entry->SetPointerForInvalidEntry(GetCurrentEntryId()); entry->InternalDoom(); + if (parent_entry) { + parent_entry->SetNextAddress(Addr(child)); + parent_entry->Release(); + } else { + data_->table[hash & mask_] = child; + } + data_->header.num_entries--; DCHECK(data_->header.num_entries >= 0); - stats_.OnEvent(Stats::INVALID_ENTRY); + stats_.OnEvent(Stats::DOOM_ENTRY); +} + +void BackendImpl::CacheEntryDestroyed() { + DecreaseNumRefs(); +} + +int32 BackendImpl::GetCurrentEntryId() { + return data_->header.this_id; +} + +int BackendImpl::MaxFileSize() const { + return max_size_ / 8; +} + +void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { + if (disabled_) + return; + if (old_size > new_size) + SubstractStorageSize(old_size - new_size); + else + AddStorageSize(new_size - old_size); + + // Update the usage statistics. + stats_.ModifyStorageStats(old_size, new_size); +} + +void BackendImpl::TooMuchStorageRequested(int32 size) { + stats_.ModifyStorageStats(0, size); +} + +void BackendImpl::CriticalError(int error) { + LOG(ERROR) << "Critical error found " << error; + if (disabled_) + return; + + LogStats(); + + // Setting the index table length to an invalid value will force re-creation + // of the cache files. + data_->header.table_len = 1; + disabled_ = true; + + if (!num_refs_) + RestartCache(); +} + +void BackendImpl::OnEvent(Stats::Counters an_event) { + stats_.OnEvent(an_event); +} + +void BackendImpl::OnStatsTimer() { + stats_.OnEvent(Stats::TIMER); + int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); + int64 time = stats_.GetCounter(Stats::TIMER); + + current = current * (time - 1) + num_refs_; + current /= time; + stats_.SetCounter(Stats::OPEN_ENTRIES, current); + stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); +} + +void BackendImpl::IncrementIoCount() { + num_pending_io_++; +} + +void BackendImpl::DecrementIoCount() { + num_pending_io_--; +} + +void BackendImpl::SetUnitTestMode() { + unit_test_ = true; +} + +void BackendImpl::ClearRefCountForTest() { + num_refs_ = 0; +} + +int BackendImpl::SelfCheck() { + if (!init_) { + LOG(ERROR) << "Init failed"; + return ERR_INIT_FAILED; + } + + int num_entries = rankings_.SelfCheck(); + if (num_entries < 0) { + LOG(ERROR) << "Invalid rankings list, error " << num_entries; + return num_entries; + } + + if (num_entries != data_->header.num_entries) { + LOG(ERROR) << "Number of entries mismatch"; + return ERR_NUM_ENTRIES_MISMATCH; + } + + return CheckAllEntries(); +} + + +// ------------------------------------------------------------------------ + +// We just created a new file so we're going to write the header and set the +// file length to include the hash table (zero filled). +bool BackendImpl::CreateBackingStore(HANDLE file) { + AdjustMaxCacheSize(0); + + IndexHeader header; + header.table_len = DesiredIndexTableLen(max_size_); + + DWORD actual; + if (!WriteFile(file, &header, sizeof(header), &actual, NULL) || + sizeof(header) != actual) + return false; + + LONG size = static_cast<LONG>(GetIndexSize(header.table_len)); + + if (INVALID_SET_FILE_POINTER == SetFilePointer(file, size, NULL, FILE_BEGIN)) + return false; + + if (!SetEndOfFile(file)) + return false; + + return true; +} + +bool BackendImpl::InitBackingStore(bool* file_created) { + // This call fails if the folder exists. + file_util::CreateDirectory(path_); + + std::wstring index_name(path_); + file_util::AppendToPath(&index_name, kIndexName); + + HANDLE file = CreateFile(index_name.c_str(), GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ, NULL, OPEN_ALWAYS, 0, NULL); + + if (INVALID_HANDLE_VALUE == file) + return false; + + bool ret = true; + if (ERROR_ALREADY_EXISTS != GetLastError()) { + *file_created = true; + ret = CreateBackingStore(file); + } else { + *file_created = false; + } + + CloseHandle(file); + if (!ret) + return false; + + index_ = new MappedFile(); + data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0)); + return true; +} + +void BackendImpl::AdjustMaxCacheSize(int table_len) { + if (max_size_) + return; + + // The user is not setting the size, let's figure it out. + ULARGE_INTEGER available, total, free; + if (!GetDiskFreeSpaceExW(path_.c_str(), &available, &total, &free)) { + max_size_ = kDefaultCacheSize; + return; + } + + // Attempt to use 1% of the disk available for this user. + available.QuadPart /= 100; + + if (available.QuadPart < static_cast<uint32>(kDefaultCacheSize)) + max_size_ = kDefaultCacheSize; + else if (available.QuadPart > static_cast<uint32>(kint32max)) + max_size_ = kint32max; + else + max_size_ = static_cast<int32>(available.LowPart); + + // Let's not use more than the default size while we tune-up the performance + // of bigger caches. TODO(rvargas): remove this limit. + if (max_size_ > kDefaultCacheSize) + max_size_ = kDefaultCacheSize; + + if (!table_len) + return; + + // If we already have a table, adjust the size to it. + int current_max_size = MaxStorageSizeForTable(table_len); + if (max_size_ > current_max_size) + max_size_= current_max_size; +} + +void BackendImpl::RestartCache() { + index_ = NULL; + block_files_.CloseFiles(); + rankings_.Reset(); + + DelayedCacheCleanup(path_); + + init_ = false; + restarted_ = true; + int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); + + // Don't call Init() if directed by the unit test: we are simulating a failure + // trying to re-enable the cache. + if (unit_test_) + init_ = true; // Let the destructor do proper cleanup. + else if (Init()) + stats_.SetCounter(Stats::FATAL_ERROR, errors + 1); } int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) { @@ -827,107 +930,170 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) { return 0; } -bool BackendImpl::CreateBlock(FileType block_type, int block_count, - Addr* block_address) { - return block_files_.CreateBlock(block_type, block_count, block_address); -} - -void BackendImpl::DeleteBlock(Addr block_address, bool deep) { - block_files_.DeleteBlock(block_address, deep); -} +EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, + bool find_parent) { + Addr address(data_->table[hash & mask_]); + EntryImpl* cache_entry = NULL; + EntryImpl* parent_entry = NULL; + bool found = false; -void BackendImpl::CacheEntryDestroyed() { - DecreaseNumRefs(); -} + for (;;) { + if (disabled_) + break; -void BackendImpl::AddStorageSize(int32 bytes) { - data_->header.num_bytes += bytes; - DCHECK(data_->header.num_bytes >= 0); + if (!address.is_initialized()) { + if (find_parent) + found = true; + break; + } - if (data_->header.num_bytes > max_size_) - TrimCache(false); -} + bool dirty; + int error = NewEntry(address, &cache_entry, &dirty); -void BackendImpl::SubstractStorageSize(int32 bytes) { - data_->header.num_bytes -= bytes; - DCHECK(data_->header.num_bytes >= 0); -} + if (error || dirty) { + // This entry is dirty on disk (it was not properly closed): we cannot + // trust it. + Addr child(0); + if (!error) + child.set_value(cache_entry->GetNextAddress()); -std::wstring BackendImpl::GetFileName(Addr address) const { - if (!address.is_separate_file() || !address.is_initialized()) { - NOTREACHED(); - return std::wstring(); - } + if (parent_entry) { + parent_entry->SetNextAddress(child); + parent_entry->Release(); + parent_entry = NULL; + } else { + data_->table[hash & mask_] = child.value(); + } - std::wstring name = StringPrintf(L"%s\\f_%06x", path_.c_str(), - address.FileNumber()); - return name; -} + if (!error) { + // It is important to call DestroyInvalidEntry after removing this + // entry from the table. + DestroyInvalidEntry(address, cache_entry); + cache_entry->Release(); + cache_entry = NULL; + } else { + Trace("NewEntry failed on MatchEntry 0x%x", address.value()); + } -bool BackendImpl::CreateExternalFile(Addr* address) { - int file_number = data_->header.last_file + 1; - Addr file_address(0); - bool success = false; - for (int i = 0; (i < 0x0fffffff) && !success; i++) { - if (!file_address.SetFileNumber(file_number)) { - file_number = 1; + // Restart the search. + address.set_value(data_->table[hash & mask_]); continue; } - std::wstring name = GetFileName(file_address); - ScopedHandle file(CreateFile(name.c_str(), GENERIC_WRITE | GENERIC_READ, - FILE_SHARE_READ, NULL, CREATE_ALWAYS, 0, - NULL)); - if (!file.IsValid()) - continue; - success = true; + if (cache_entry->IsSameEntry(key, hash)) { + cache_entry = EntryImpl::Update(cache_entry); + found = true; + break; + } + cache_entry = EntryImpl::Update(cache_entry); + if (parent_entry) + parent_entry->Release(); + parent_entry = cache_entry; + cache_entry = NULL; + if (!parent_entry) + break; + + address.set_value(parent_entry->GetNextAddress()); } - DCHECK(success); - if (!success) - return false; + if (parent_entry && (!find_parent || !found)) { + parent_entry->Release(); + parent_entry = NULL; + } - data_->header.last_file = file_number; - address->set_value(file_address.value()); - return true; + if (cache_entry && (find_parent || !found)) { + cache_entry->Release(); + cache_entry = NULL; + } + + return find_parent ? parent_entry : cache_entry; } -int BackendImpl::SelfCheck() { - if (!init_) { - LOG(ERROR) << "Init failed"; - return ERR_INIT_FAILED; - } +void BackendImpl::DestroyInvalidEntry(Addr address, EntryImpl* entry) { + LOG(WARNING) << "Destroying invalid entry."; + Trace("Destroying invalid entry 0x%p", entry); - int num_entries = rankings_.SelfCheck(); - if (num_entries < 0) { - LOG(ERROR) << "Invalid rankings list, error " << num_entries; - return num_entries; - } + rankings_.Remove(entry->rankings()); + entry->SetPointerForInvalidEntry(GetCurrentEntryId()); - if (num_entries != data_->header.num_entries) { - LOG(ERROR) << "Number of entries mismatch"; - return ERR_NUM_ENTRIES_MISMATCH; - } + entry->InternalDoom(); - return CheckAllEntries(); + data_->header.num_entries--; + DCHECK(data_->header.num_entries >= 0); + stats_.OnEvent(Stats::INVALID_ENTRY); } -void BackendImpl::CriticalError(int error) { - LOG(ERROR) << "Critical error found " << error; +void BackendImpl::TrimCache(bool empty) { + Trace("*** Trim Cache ***"); if (disabled_) return; - LogStats(); + Rankings::ScopedRankingsBlock node(&rankings_); + Rankings::ScopedRankingsBlock next(&rankings_, rankings_.GetPrev(node.get())); + DCHECK(next.get()); + int target_size = empty ? 0 : LowWaterAdjust(max_size_); + while (data_->header.num_bytes > target_size && next.get()) { + node.reset(next.release()); + next.reset(rankings_.GetPrev(node.get())); + if (!node->Data()->pointer || empty) { + // This entry is not being used by anybody. + EntryImpl* entry; + bool dirty; + if (NewEntry(Addr(node->Data()->contents), &entry, &dirty)) { + Trace("NewEntry failed on Trim 0x%x", node->address().value()); + continue; + } - // Setting the index table length to an invalid value will force re-creation - // of the cache files. - data_->header.table_len = 1; - disabled_ = true; + if (node->Data()->pointer) { + entry = EntryImpl::Update(entry); + } + entry->Doom(); + entry->Release(); + if (!empty) + stats_.OnEvent(Stats::TRIM_ENTRY); + } + } - if (!num_refs_) + Trace("*** Trim Cache end ***"); + return; +} + +void BackendImpl::AddStorageSize(int32 bytes) { + data_->header.num_bytes += bytes; + DCHECK(data_->header.num_bytes >= 0); + + if (data_->header.num_bytes > max_size_) + TrimCache(false); +} + +void BackendImpl::SubstractStorageSize(int32 bytes) { + data_->header.num_bytes -= bytes; + DCHECK(data_->header.num_bytes >= 0); +} + +void BackendImpl::IncreaseNumRefs() { + num_refs_++; + if (max_refs_ < num_refs_) + max_refs_ = num_refs_; +} + +void BackendImpl::DecreaseNumRefs() { + DCHECK(num_refs_); + num_refs_--; + + if (!num_refs_ && disabled_) RestartCache(); } +void BackendImpl::LogStats() { + StatsItems stats; + GetStats(&stats); + + for (size_t index = 0; index < stats.size(); index++) { + LOG(INFO) << stats[index].first << ": " << stats[index].second; + } +} + bool BackendImpl::CheckIndex() { if (!data_) { LOG(ERROR) << "Unable to map Index file"; @@ -1018,157 +1184,4 @@ bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { return !rankings->pointer; } -void BackendImpl::LogStats() { - StatsItems stats; - GetStats(&stats); - - for (size_t index = 0; index < stats.size(); index++) { - LOG(INFO) << stats[index].first << ": " << stats[index].second; - } -} - -void BackendImpl::RestartCache() { - index_ = NULL; - block_files_.CloseFiles(); - rankings_.Reset(); - - DelayedCacheCleanup(path_); - - init_ = false; - restarted_ = true; - int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); - - // Don't call Init() if directed by the unit test: we are simulating a failure - // trying to re-enable the cache. - if (unit_test_) - init_ = true; // Let the destructor do proper cleanup. - else if (Init()) - stats_.SetCounter(Stats::FATAL_ERROR, errors + 1); -} - -void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { - Addr address(rankings->Data()->contents); - EntryImpl* cache_entry = NULL; - bool dirty; - if (NewEntry(address, &cache_entry, &dirty)) - return; - - uint32 hash = cache_entry->GetHash(); - cache_entry->Release(); - - // Anything on the table means that this entry is there. - if (data_->table[hash & mask_]) - return; - - data_->table[hash & mask_] = address.value(); -} - -void BackendImpl::UpdateRank(CacheRankingsBlock* node, bool modified) { - rankings_.UpdateRank(node, modified); -} - -void BackendImpl::IncrementIoCount() { - num_pending_io_++; -} - -void BackendImpl::DecrementIoCount() { - num_pending_io_--; -} - -int32 BackendImpl::GetCurrentEntryId() { - return data_->header.this_id; -} - -void BackendImpl::ClearRefCountForTest() { - num_refs_ = 0; -} - -void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { - if (disabled_) - return; - if (old_size > new_size) - SubstractStorageSize(old_size - new_size); - else - AddStorageSize(new_size - old_size); - - // Update the usage statistics. - stats_.ModifyStorageStats(old_size, new_size); -} - -void BackendImpl::OnEvent(Stats::Counters an_event) { - stats_.OnEvent(an_event); -} - -void BackendImpl::TooMuchStorageRequested(int32 size) { - stats_.ModifyStorageStats(0, size); -} - -int BackendImpl::MaxFileSize() const { - return max_size_ / 8; -} - -void BackendImpl::OnStatsTimer() { - stats_.OnEvent(Stats::TIMER); - int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); - int64 time = stats_.GetCounter(Stats::TIMER); - - current = current * (time - 1) + num_refs_; - current /= time; - stats_.SetCounter(Stats::OPEN_ENTRIES, current); - stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); -} - -void BackendImpl::IncreaseNumRefs() { - num_refs_++; - if (max_refs_ < num_refs_) - max_refs_ = num_refs_; -} - -void BackendImpl::DecreaseNumRefs() { - DCHECK(num_refs_); - num_refs_--; - - if (!num_refs_ && disabled_) - RestartCache(); -} - -void BackendImpl::SetUnitTestMode() { - unit_test_ = true; -} - -void BackendImpl::AdjustMaxCacheSize(int table_len) { - if (max_size_) - return; - - // The user is not setting the size, let's figure it out. - ULARGE_INTEGER available, total, free; - if (!GetDiskFreeSpaceExW(path_.c_str(), &available, &total, &free)) { - max_size_ = kDefaultCacheSize; - return; - } - - // Attempt to use 1% of the disk available for this user. - available.QuadPart /= 100; - - if (available.QuadPart < static_cast<uint32>(kDefaultCacheSize)) - max_size_ = kDefaultCacheSize; - else if (available.QuadPart > static_cast<uint32>(kint32max)) - max_size_ = kint32max; - else - max_size_ = static_cast<int32>(available.LowPart); - - // Let's not use more than the default size while we tune-up the performance - // of bigger caches. TODO(rvargas): remove this limit. - if (max_size_ > kDefaultCacheSize) - max_size_ = kDefaultCacheSize; - - if (!table_len) - return; - - // If we already have a table, adjust the size to it. - int current_max_size = MaxStorageSizeForTable(table_len); - if (max_size_ > current_max_size) - max_size_= current_max_size; -} - } // namespace disk_cache diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h index b891487..602190a 100644 --- a/net/disk_cache/backend_impl.h +++ b/net/disk_cache/backend_impl.h @@ -74,12 +74,14 @@ class BackendImpl : public Backend { // Sets the maximum size for the total amount of data stored by this instance. bool SetMaxSize(int max_bytes); + // Returns the full name for an external storage file. + std::wstring GetFileName(Addr address) const; + // Returns the actual file used to store a given (non-external) address. - MappedFile* File(Addr address) { - if (disabled_) - return NULL; - return block_files_.GetFile(address); - } + MappedFile* File(Addr address); + + // Creates an external storage file. + bool CreateExternalFile(Addr* address); // Creates a new storage block of size block_count. bool CreateBlock(FileType block_type, int block_count, @@ -89,64 +91,66 @@ class BackendImpl : public Backend { // the related storage in addition of releasing the related block. void DeleteBlock(Addr block_address, bool deep); - // Permanently deletes an entry. - void InternalDoomEntry(EntryImpl* entry); - - // Returns the full name for an external storage file. - std::wstring GetFileName(Addr address) const; - - // Creates an external storage file. - bool CreateExternalFile(Addr* address); - // Updates the ranking information for an entry. void UpdateRank(CacheRankingsBlock* node, bool modified); - // This method must be called whenever an entry is released for the last time. - void CacheEntryDestroyed(); - - // Handles the pending asynchronous IO count. - void IncrementIoCount(); - void DecrementIoCount(); - - // Returns the id being used on this run of the cache. - int32 GetCurrentEntryId(); - // A node was recovered from a crash, it may not be on the index, so this // method checks it and takes the appropriate action. void RecoveredEntry(CacheRankingsBlock* rankings); - // Clears the counter of references to test handling of corruptions. - void ClearRefCountForTest(); + // Permanently deletes an entry. + void InternalDoomEntry(EntryImpl* entry); - // Sets internal parameters to enable unit testing mode. - void SetUnitTestMode(); + // This method must be called whenever an entry is released for the last time. + void CacheEntryDestroyed(); - // A user data block is being created, extended or truncated. - void ModifyStorageSize(int32 old_size, int32 new_size); + // Returns the id being used on this run of the cache. + int32 GetCurrentEntryId(); // Returns the maximum size for a file to reside on the cache. int MaxFileSize() const; + // A user data block is being created, extended or truncated. + void ModifyStorageSize(int32 old_size, int32 new_size); + // Logs requests that are denied due to being too big. void TooMuchStorageRequested(int32 size); + // Reports a critical error (and disables the cache). + void CriticalError(int error); + // Called when an interesting event should be logged (counted). void OnEvent(Stats::Counters an_event); // Timer callback to calculate usage statistics. void OnStatsTimer(); + // Handles the pending asynchronous IO count. + void IncrementIoCount(); + void DecrementIoCount(); + + // Sets internal parameters to enable unit testing mode. + void SetUnitTestMode(); + + // Clears the counter of references to test handling of corruptions. + void ClearRefCountForTest(); + // Peforms a simple self-check, and returns the number of dirty items // or an error code (negative value). int SelfCheck(); - // Reports a critical error (and disables the cache). - void CriticalError(int error); - private: // Creates a new backing file for the cache index. bool CreateBackingStore(HANDLE file); bool InitBackingStore(bool* file_created); + void AdjustMaxCacheSize(int table_len); + + // Deletes the cache and starts again. + void RestartCache(); + + // Creates a new entry object and checks to see if it is dirty. Returns zero + // on success, or a disk_cache error on failure. + int NewEntry(Addr address, EntryImpl** entry, bool* dirty); // Returns a given entry from the cache. The entry to match is determined by // key and hash, and the returned entry may be the matched one or it's parent @@ -154,32 +158,13 @@ class BackendImpl : public Backend { EntryImpl* MatchEntry(const std::string& key, uint32 hash, bool find_parent); + void DestroyInvalidEntry(Addr address, EntryImpl* entry); + // Deletes entries from the cache until the current size is below the limit. // If empty is true, the whole cache will be trimmed, regardless of being in // use. void TrimCache(bool empty); - - void DestroyInvalidEntry(Addr address, EntryImpl* entry); - - // Creates a new entry object and checks to see if it is dirty. Returns zero - // on success, or a disk_cache error on failure. - int NewEntry(Addr address, EntryImpl** entry, bool* dirty); - - // Part of the selt test. Returns the number or dirty entries, or an error. - int CheckAllEntries(); - - // Part of the self test. Returns false if the entry is corrupt. - bool CheckEntry(EntryImpl* cache_entry); - - // Performs basic checks on the index file. Returns false on failure. - bool CheckIndex(); - - // Dumps current cache statistics to the log. - void LogStats(); - - // Deletes the cache and starts again. - void RestartCache(); - + // Handles the used storage count. void AddStorageSize(int32 bytes); void SubstractStorageSize(int32 bytes); @@ -188,7 +173,17 @@ class BackendImpl : public Backend { void IncreaseNumRefs(); void DecreaseNumRefs(); - void AdjustMaxCacheSize(int table_len); + // Dumps current cache statistics to the log. + void LogStats(); + + // Performs basic checks on the index file. Returns false on failure. + bool CheckIndex(); + + // Part of the selt test. Returns the number or dirty entries, or an error. + int CheckAllEntries(); + + // Part of the self test. Returns false if the entry is corrupt. + bool CheckEntry(EntryImpl* cache_entry); scoped_refptr<MappedFile> index_; // The main cache index. std::wstring path_; // Path to the folder used as backing storage. diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc index b25aad3..75ab5ab 100644 --- a/net/disk_cache/entry_impl.cc +++ b/net/disk_cache/entry_impl.cc @@ -174,70 +174,6 @@ EntryImpl::~EntryImpl() { backend_->CacheEntryDestroyed(); } -void EntryImpl::DeleteData(Addr address, int index) { - if (!address.is_initialized()) - return; - if (address.is_separate_file()) { - if (files_[index]) - files_[index] = NULL; // Releases the object. - - if (!DeleteFile(backend_->GetFileName(address).c_str())) - LOG(ERROR) << "Failed to delete " << backend_->GetFileName(address) << - " from the cache."; - } else { - backend_->DeleteBlock(address, true); - } -} - -bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, - uint32 hash) { - Trace("Create entry In"); - EntryStore* entry_store = entry_.Data(); - RankingsNode* node = node_.Data(); - memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); - memset(node, 0, sizeof(RankingsNode)); - if (!node_.LazyInit(backend_->File(node_address), node_address)) - return false; - - entry_store->rankings_node = node_address.value(); - node->contents = entry_.address().value(); - node->pointer = this; - - entry_store->hash = hash; - entry_store->key_len = static_cast<int32>(key.size()); - if (entry_store->key_len > kMaxInternalKeyLength) { - Addr address(0); - if (!CreateBlock(entry_store->key_len + 1, &address)) - return false; - - entry_store->long_key = address.value(); - File* file = GetBackingFile(address, kKeyFileIndex); - - size_t offset = 0; - if (address.is_block_file()) - offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; - - if (!file || !file->Write(key.data(), key.size(), offset)) { - DeleteData(address, kKeyFileIndex); - return false; - } - - if (address.is_separate_file()) - file->SetLength(key.size() + 1); - } else { - memcpy(entry_store->key, key.data(), key.size()); - entry_store->key[key.size()] = '\0'; - } - backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); - node->dirty = backend_->GetCurrentEntryId(); - Log("Create Entry "); - return true; -} - -void EntryImpl::Close() { - Release(); -} - void EntryImpl::Doom() { if (doomed_) return; @@ -246,13 +182,8 @@ void EntryImpl::Doom() { backend_->InternalDoomEntry(this); } -void EntryImpl::InternalDoom() { - DCHECK(node_.HasData()); - if (!node_.Data()->dirty) { - node_.Data()->dirty = backend_->GetCurrentEntryId(); - node_.Store(); - } - doomed_ = true; +void EntryImpl::Close() { + Release(); } std::string EntryImpl::GetKey() const { @@ -443,6 +374,231 @@ int EntryImpl::WriteData(int index, int offset, const char* buf, int buf_len, return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; } +uint32 EntryImpl::GetHash() { + return entry_.Data()->hash; +} + +bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, + uint32 hash) { + Trace("Create entry In"); + EntryStore* entry_store = entry_.Data(); + RankingsNode* node = node_.Data(); + memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); + memset(node, 0, sizeof(RankingsNode)); + if (!node_.LazyInit(backend_->File(node_address), node_address)) + return false; + + entry_store->rankings_node = node_address.value(); + node->contents = entry_.address().value(); + node->pointer = this; + + entry_store->hash = hash; + entry_store->key_len = static_cast<int32>(key.size()); + if (entry_store->key_len > kMaxInternalKeyLength) { + Addr address(0); + if (!CreateBlock(entry_store->key_len + 1, &address)) + return false; + + entry_store->long_key = address.value(); + File* file = GetBackingFile(address, kKeyFileIndex); + + size_t offset = 0; + if (address.is_block_file()) + offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; + + if (!file || !file->Write(key.data(), key.size(), offset)) { + DeleteData(address, kKeyFileIndex); + return false; + } + + if (address.is_separate_file()) + file->SetLength(key.size() + 1); + } else { + memcpy(entry_store->key, key.data(), key.size()); + entry_store->key[key.size()] = '\0'; + } + backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); + node->dirty = backend_->GetCurrentEntryId(); + Log("Create Entry "); + return true; +} + +bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { + if (entry_.Data()->hash != hash || entry_.Data()->key_len != key.size()) + return false; + + std::string my_key = GetKey(); + return key.compare(my_key) ? false : true; +} + +void EntryImpl::InternalDoom() { + DCHECK(node_.HasData()); + if (!node_.Data()->dirty) { + node_.Data()->dirty = backend_->GetCurrentEntryId(); + node_.Store(); + } + doomed_ = true; +} + +CacheAddr EntryImpl::GetNextAddress() { + return entry_.Data()->next; +} + +void EntryImpl::SetNextAddress(Addr address) { + entry_.Data()->next = address.value(); + bool success = entry_.Store(); + DCHECK(success); +} + +bool EntryImpl::LoadNodeAddress() { + Addr address(entry_.Data()->rankings_node); + if (!node_.LazyInit(backend_->File(address), address)) + return false; + return node_.Load(); +} + +EntryImpl* EntryImpl::Update(EntryImpl* entry) { + DCHECK(entry->rankings()->HasData()); + + RankingsNode* rankings = entry->rankings()->Data(); + if (rankings->pointer) { + // Already in memory. Prevent clearing the dirty flag on the destructor. + rankings->dirty = 0; + EntryImpl* real_node = reinterpret_cast<EntryImpl*>(rankings->pointer); + real_node->AddRef(); + entry->Release(); + return real_node; + } else { + rankings->dirty = entry->backend_->GetCurrentEntryId(); + rankings->pointer = entry; + if (!entry->rankings()->Store()) { + entry->Release(); + return NULL; + } + return entry; + } +} + +bool EntryImpl::IsDirty(int32 current_id) { + DCHECK(node_.HasData()); + return node_.Data()->dirty && current_id != node_.Data()->dirty; +} + +void EntryImpl::ClearDirtyFlag() { + node_.Data()->dirty = 0; +} + +void EntryImpl::SetPointerForInvalidEntry(int32 new_id) { + node_.Data()->dirty = new_id; + node_.Data()->pointer = this; + node_.Store(); +} + +bool EntryImpl::SanityCheck() { + if (!entry_.Data()->rankings_node || !entry_.Data()->key_len) + return false; + + Addr rankings_addr(entry_.Data()->rankings_node); + if (!rankings_addr.is_initialized() || rankings_addr.is_separate_file() || + rankings_addr.file_type() != RANKINGS) + return false; + + Addr next_addr(entry_.Data()->next); + if (next_addr.is_initialized() && + (next_addr.is_separate_file() || next_addr.file_type() != BLOCK_256)) + return false; + + return true; +} + +void EntryImpl::IncrementIoCount() { + backend_->IncrementIoCount(); +} + +void EntryImpl::DecrementIoCount() { + backend_->DecrementIoCount(); +} + +bool EntryImpl::CreateDataBlock(int index, int size) { + Addr address(entry_.Data()->data_addr[index]); + DCHECK(0 == index || 1 == index); + + if (!CreateBlock(size, &address)) + return false; + + entry_.Data()->data_addr[index] = address.value(); + entry_.Store(); + return true; +} + +bool EntryImpl::CreateBlock(int size, Addr* address) { + DCHECK(!address->is_initialized()); + + FileType file_type = Addr::RequiredFileType(size); + if (EXTERNAL == file_type) { + if (size > backend_->MaxFileSize()) + return false; + if (!backend_->CreateExternalFile(address)) + return false; + } else { + int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) / + Addr::BlockSizeForFileType(file_type); + + if (!backend_->CreateBlock(file_type, num_blocks, address)) + return false; + } + return true; +} + +void EntryImpl::DeleteData(Addr address, int index) { + if (!address.is_initialized()) + return; + if (address.is_separate_file()) { + if (files_[index]) + files_[index] = NULL; // Releases the object. + + if (!DeleteFile(backend_->GetFileName(address).c_str())) + LOG(ERROR) << "Failed to delete " << backend_->GetFileName(address) << + " from the cache."; + } else { + backend_->DeleteBlock(address, true); + } +} + +void EntryImpl::UpdateRank(bool modified) { + if (!doomed_) { + // Everything is handled by the backend. + backend_->UpdateRank(&node_, true); + return; + } + + Time current = Time::Now(); + node_.Data()->last_used = current.ToInternalValue(); + + if (modified) + node_.Data()->last_modified = current.ToInternalValue(); +} + +File* EntryImpl::GetBackingFile(Addr address, int index) { + File* file; + if (address.is_separate_file()) + file = GetExternalFile(address, index); + else + file = backend_->File(address); + return file; +} + +File* EntryImpl::GetExternalFile(Addr address, int index) { + DCHECK(index >= 0 && index <= 2); + if (!files_[index].get()) { + // For a key file, use mixed mode IO. + scoped_refptr<File> file(new File(2 == index)); + if (file->Init(backend_->GetFileName(address))) + files_[index].swap(file); + } + return files_[index].get(); +} + bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, bool truncate) { Addr address(entry_.Data()->data_addr[index]); @@ -512,21 +668,6 @@ bool EntryImpl::GrowUserBuffer(int index, int offset, int buf_len, return true; } -bool EntryImpl::ImportSeparateFile(int index, int offset, int buf_len) { - if (entry_.Data()->data_size[index] > offset + buf_len) { - entry_.Data()->data_size[index] = offset + buf_len; - unreported_size_[index] += offset + buf_len - - entry_.Data()->data_size[index]; - } - - if (!MoveToLocalBuffer(index)) - return false; - - // Clear the end of the buffer. - ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len); - return true; -} - bool EntryImpl::MoveToLocalBuffer(int index) { Addr address(entry_.Data()->data_addr[index]); DCHECK(!user_buffers_[index].get()); @@ -556,6 +697,22 @@ bool EntryImpl::MoveToLocalBuffer(int index) { return true; } +bool EntryImpl::ImportSeparateFile(int index, int offset, int buf_len) { + if (entry_.Data()->data_size[index] > offset + buf_len) { + entry_.Data()->data_size[index] = offset + buf_len; + unreported_size_[index] += offset + buf_len - + entry_.Data()->data_size[index]; + } + + if (!MoveToLocalBuffer(index)) + return false; + + // Clear the end of the buffer. + ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len); + return true; +} + + // The common scenario is that this is called from the destructor of the entry, // to write to disk what we have buffered. We don't want to hold the destructor // until the actual IO finishes, so we'll send an asynchronous write that will @@ -603,162 +760,6 @@ bool EntryImpl::Flush(int index, int size, bool async) { return true; } -bool EntryImpl::LoadNodeAddress() { - Addr address(entry_.Data()->rankings_node); - if (!node_.LazyInit(backend_->File(address), address)) - return false; - return node_.Load(); -} - -EntryImpl* EntryImpl::Update(EntryImpl* entry) { - DCHECK(entry->rankings()->HasData()); - - RankingsNode* rankings = entry->rankings()->Data(); - if (rankings->pointer) { - // Already in memory. Prevent clearing the dirty flag on the destructor. - rankings->dirty = 0; - EntryImpl* real_node = reinterpret_cast<EntryImpl*>(rankings->pointer); - real_node->AddRef(); - entry->Release(); - return real_node; - } else { - rankings->dirty = entry->backend_->GetCurrentEntryId(); - rankings->pointer = entry; - if (!entry->rankings()->Store()) { - entry->Release(); - return NULL; - } - return entry; - } -} - -bool EntryImpl::CreateDataBlock(int index, int size) { - Addr address(entry_.Data()->data_addr[index]); - DCHECK(0 == index || 1 == index); - - if (!CreateBlock(size, &address)) - return false; - - entry_.Data()->data_addr[index] = address.value(); - entry_.Store(); - return true; -} - -bool EntryImpl::CreateBlock(int size, Addr* address) { - DCHECK(!address->is_initialized()); - - FileType file_type = Addr::RequiredFileType(size); - if (EXTERNAL == file_type) { - if (size > backend_->MaxFileSize()) - return false; - if (!backend_->CreateExternalFile(address)) - return false; - } else { - int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) / - Addr::BlockSizeForFileType(file_type); - - if (!backend_->CreateBlock(file_type, num_blocks, address)) - return false; - } - return true; -} - -bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { - if (entry_.Data()->hash != hash || entry_.Data()->key_len != key.size()) - return false; - - std::string my_key = GetKey(); - return key.compare(my_key) ? false : true; -} - -CacheAddr EntryImpl::GetNextAddress() { - return entry_.Data()->next; -} - -void EntryImpl::SetNextAddress(Addr address) { - entry_.Data()->next = address.value(); - bool success = entry_.Store(); - DCHECK(success); -} - -void EntryImpl::UpdateRank(bool modified) { - if (!doomed_) { - // Everything is handled by the backend. - backend_->UpdateRank(&node_, true); - return; - } - - Time current = Time::Now(); - node_.Data()->last_used = current.ToInternalValue(); - - if (modified) - node_.Data()->last_modified = current.ToInternalValue(); -} - -File* EntryImpl::GetBackingFile(Addr address, int index) { - File* file; - if (address.is_separate_file()) - file = GetExternalFile(address, index); - else - file = backend_->File(address); - return file; -} - -File* EntryImpl::GetExternalFile(Addr address, int index) { - DCHECK(index >= 0 && index <= 2); - if (!files_[index].get()) { - // For a key file, use mixed mode IO. - scoped_refptr<File> file(new File(2 == index)); - if (file->Init(backend_->GetFileName(address))) - files_[index].swap(file); - } - return files_[index].get(); -} - -uint32 EntryImpl::GetHash() { - return entry_.Data()->hash; -} - -bool EntryImpl::IsDirty(int32 current_id) { - DCHECK(node_.HasData()); - return node_.Data()->dirty && current_id != node_.Data()->dirty; -} - -void EntryImpl::ClearDirtyFlag() { - node_.Data()->dirty = 0; -} - -void EntryImpl::SetPointerForInvalidEntry(int32 new_id) { - node_.Data()->dirty = new_id; - node_.Data()->pointer = this; - node_.Store(); -} - -bool EntryImpl::SanityCheck() { - if (!entry_.Data()->rankings_node || !entry_.Data()->key_len) - return false; - - Addr rankings_addr(entry_.Data()->rankings_node); - if (!rankings_addr.is_initialized() || rankings_addr.is_separate_file() || - rankings_addr.file_type() != RANKINGS) - return false; - - Addr next_addr(entry_.Data()->next); - if (next_addr.is_initialized() && - (next_addr.is_separate_file() || next_addr.file_type() != BLOCK_256)) - return false; - - return true; -} - -void EntryImpl::IncrementIoCount() { - backend_->IncrementIoCount(); -} - -void EntryImpl::DecrementIoCount() { - backend_->DecrementIoCount(); -} - void EntryImpl::Log(const char* msg) { void* pointer = NULL; int dirty = 0; |