diff options
author | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2008-12-11 23:49:43 +0000 |
---|---|---|
committer | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2008-12-11 23:49:43 +0000 |
commit | 763ebe7adfd2aa850f94862b49ee6d7371d0e22f (patch) | |
tree | 687a82eaec0a9896698b1b8cbcdd999964fead42 /net/disk_cache | |
parent | 579f8970ea79d64a3d76c9201f28c9b0c8ed5862 (diff) | |
download | chromium_src-763ebe7adfd2aa850f94862b49ee6d7371d0e22f.zip chromium_src-763ebe7adfd2aa850f94862b49ee6d7371d0e22f.tar.gz chromium_src-763ebe7adfd2aa850f94862b49ee6d7371d0e22f.tar.bz2 |
Disk cache: Set up an experiment to measure the effect of
increasing the maximum cache size.
There will be three groups on the experiment, each group with
10% of the users (Dev Channel). Each group will have the max
cache size increased by a factor from 2 to 4 times the current
size, and we'll measure the age of evicted entries.
I'm also adding a few metrics of reliability, to detect how
often we're not getting to do proper cleanup.
Review URL: http://codereview.chromium.org/14013
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@6847 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r-- | net/disk_cache/backend_impl.cc | 107 | ||||
-rw-r--r-- | net/disk_cache/backend_impl.h | 5 | ||||
-rw-r--r-- | net/disk_cache/disk_format.h | 4 | ||||
-rw-r--r-- | net/disk_cache/errors.h | 6 |
4 files changed, 94 insertions, 28 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc index d9fe0cc..ed8492c 100644 --- a/net/disk_cache/backend_impl.cc +++ b/net/disk_cache/backend_impl.cc @@ -26,8 +26,8 @@ const wchar_t* kIndexName = L"index"; const int kCleanUpMargin = 1024 * 1024; const int kMaxOldFolders = 100; -// Seems like ~160 MB correspond to ~50k entries. -const int k64kEntriesStore = 160 * 1000 * 1000; +// Seems like ~240 MB correspond to less than 50k entries for 99% of the people. +const int k64kEntriesStore = 240 * 1000 * 1000; const int kBaseTableLen = 64 * 1024; const int kDefaultCacheSize = 80 * 1024 * 1024; @@ -136,6 +136,23 @@ bool DelayedCacheCleanup(const std::wstring& full_path) { return true; } +// Sets |stored_value| for the current experiment. +void InitExperiment(int* stored_value) { + if (*stored_value) + return; + + srand(static_cast<int>(Time::Now().ToInternalValue())); + int option = rand() % 10; + + // Values used by the current experiment are 1 through 4. + if (option > 2) { + // 70% will be here. + *stored_value = 1; + } else { + *stored_value = option + 2; + } +} + } // namespace // ------------------------------------------------------------------------ @@ -184,8 +201,10 @@ bool BackendImpl::Init() { return false; bool create_files = false; - if (!InitBackingStore(&create_files)) + if (!InitBackingStore(&create_files)) { + ReportError(ERR_STORAGE_ERROR); return false; + } num_refs_ = num_pending_io_ = max_refs_ = 0; @@ -197,9 +216,13 @@ bool BackendImpl::Init() { } init_ = true; + if (data_) + InitExperiment(&data_->header.experiment); - if (!CheckIndex()) + if (!CheckIndex()) { + ReportError(ERR_INIT_FAILED); return false; + } // We don't care if the value overflows. The only thing we care about is that // the id cannot be zero, because that value is used as "not dirty". @@ -209,6 +232,13 @@ bool BackendImpl::Init() { if (!data_->header.this_id) data_->header.this_id++; + if (data_->header.crash) { + ReportError(ERR_PREVIOUS_CRASH); + } else { + ReportError(0); + data_->header.crash = 1; + } + if (!block_files_.Init(create_files)) return false; @@ -227,6 +257,9 @@ BackendImpl::~BackendImpl() { if (!init_) return; + if (data_) + data_->header.crash = 0; + timer_.Stop(); WaitForPendingIO(&num_pending_io_); @@ -354,12 +387,8 @@ bool BackendImpl::DoomEntry(const std::string& key) { bool BackendImpl::DoomAllEntries() { if (!num_refs_) { - index_ = NULL; - block_files_.CloseFiles(); - rankings_.Reset(); + PrepareForRestart(); DeleteCache(path_.c_str(), false); - init_ = false; - restarted_ = true; return Init(); } else { if (disabled_) @@ -624,6 +653,7 @@ void BackendImpl::CriticalError(int error) { return; LogStats(); + ReportError(error); // Setting the index table length to an invalid value will force re-creation // of the cache files. @@ -634,6 +664,15 @@ void BackendImpl::CriticalError(int error) { RestartCache(); } +void BackendImpl::ReportError(int error) { + static LinearHistogram counter(L"DiskCache.Error", 0, 49, 50); + counter.SetFlags(kUmaTargetedHistogramFlag); + + // We transmit positive numbers, instead of direct error codes. + DCHECK(error <= 0); + counter.Add(error * -1); +} + void BackendImpl::OnEvent(Stats::Counters an_event) { stats_.OnEvent(an_event); } @@ -651,10 +690,13 @@ void BackendImpl::OnStatsTimer() { static bool first_time = true; if (first_time) { first_time = false; - UMA_HISTOGRAM_COUNTS(L"DiskCache.Entries", data_->header.num_entries); - UMA_HISTOGRAM_COUNTS(L"DiskCache.Size", - data_->header.num_bytes / (1024 * 1024)); - UMA_HISTOGRAM_COUNTS(L"DiskCache.MaxSize", max_size_ / (1024 * 1024)); + int experiment = data_ ? data_->header.experiment : 1; + std::wstring entries(StringPrintf(L"DiskCache.Entries_%d", experiment)); + std::wstring size(StringPrintf(L"DiskCache.Size_%d", experiment)); + std::wstring max_size(StringPrintf(L"DiskCache.MaxSize_%d", experiment)); + UMA_HISTOGRAM_COUNTS(entries.c_str(), data_->header.num_entries); + UMA_HISTOGRAM_COUNTS(size.c_str(), data_->header.num_bytes / (1024 * 1024)); + UMA_HISTOGRAM_COUNTS(max_size.c_str(), max_size_ / (1024 * 1024)); } } @@ -770,8 +812,10 @@ void BackendImpl::AdjustMaxCacheSize(int table_len) { // Let's not use more than the default size while we tune-up the performance // of bigger caches. TODO(rvargas): remove this limit. - if (max_size_ > kDefaultCacheSize) - max_size_ = kDefaultCacheSize; + int multiplier = table_len ? data_->header.experiment : 1; + DCHECK(multiplier > 0 && multiplier < 5); + if (max_size_ > kDefaultCacheSize * multiplier) + max_size_ = kDefaultCacheSize * multiplier; if (!table_len) return; @@ -783,14 +827,9 @@ void BackendImpl::AdjustMaxCacheSize(int table_len) { } void BackendImpl::RestartCache() { - index_ = NULL; - block_files_.CloseFiles(); - rankings_.Reset(); - + PrepareForRestart(); DelayedCacheCleanup(path_); - init_ = false; - restarted_ = true; int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); // Don't call Init() if directed by the unit test: we are simulating a failure @@ -801,6 +840,16 @@ void BackendImpl::RestartCache() { stats_.SetCounter(Stats::FATAL_ERROR, errors + 1); } +void BackendImpl::PrepareForRestart() { + data_->header.crash = 0; + index_ = NULL; + data_ = NULL; + block_files_.CloseFiles(); + rankings_.Reset(); + init_ = false; + restarted_ = true; +} + int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) { scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, address)); IncreaseNumRefs(); @@ -1005,9 +1054,7 @@ void BackendImpl::TrimCache(bool empty) { if (node->Data()->pointer) { entry = EntryImpl::Update(entry); } - static Histogram counter(L"DiskCache.TrimAge", 1, 10000, 50); - counter.SetFlags(kUmaTargetedHistogramFlag); - counter.Add((Time::Now() - entry->GetLastUsed()).InHours()); + ReportTrimTimes(entry); entry->Doom(); entry->Release(); if (!empty) @@ -1027,6 +1074,14 @@ void BackendImpl::TrimCache(bool empty) { return; } +void BackendImpl::ReportTrimTimes(EntryImpl* entry) { + std::wstring name(StringPrintf(L"DiskCache.TrimAge_%d", + data_->header.experiment)); + static Histogram counter(name.c_str(), 1, 10000, 50); + counter.SetFlags(kUmaTargetedHistogramFlag); + counter.Add((Time::Now() - entry->GetLastUsed()).InHours()); +} + void BackendImpl::AddStorageSize(int32 bytes) { data_->header.num_bytes += bytes; DCHECK(data_->header.num_bytes >= 0); @@ -1093,8 +1148,10 @@ bool BackendImpl::CheckIndex() { max_size_ = kDefaultCacheSize; } + // We need to avoid integer overflows. + DCHECK(max_size_ < kint32max - kint32max / 10); if (data_->header.num_bytes < 0 || - data_->header.num_bytes > max_size_ * 11 / 10) { + data_->header.num_bytes > max_size_ + max_size_ / 10) { LOG(ERROR) << "Invalid cache (current) size"; return false; } diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h index 7b747de..9373e90 100644 --- a/net/disk_cache/backend_impl.h +++ b/net/disk_cache/backend_impl.h @@ -99,6 +99,9 @@ class BackendImpl : public Backend { // Reports a critical error (and disables the cache). void CriticalError(int error); + // Reports an uncommon, recoverable error. + void ReportError(int error); + // Called when an interesting event should be logged (counted). void OnEvent(Stats::Counters an_event); @@ -133,6 +136,7 @@ class BackendImpl : public Backend { // Deletes the cache and starts again. void RestartCache(); + void PrepareForRestart(); // Creates a new entry object and checks to see if it is dirty. Returns zero // on success, or a disk_cache error on failure. @@ -153,6 +157,7 @@ class BackendImpl : public Backend { // If empty is true, the whole cache will be trimmed, regardless of being in // use. void TrimCache(bool empty); + void ReportTrimTimes(EntryImpl* entry); // Handles the used storage count. void AddStorageSize(int32 bytes); diff --git a/net/disk_cache/disk_format.h b/net/disk_cache/disk_format.h index 2e839a1..7dc2e7c 100644 --- a/net/disk_cache/disk_format.h +++ b/net/disk_cache/disk_format.h @@ -84,7 +84,9 @@ struct IndexHeader { int32 this_id; // Id for all entries being changed (dirty flag). CacheAddr stats; // Storage for usage data. int32 table_len; // Actual size of the table (0 == kIndexTablesize). - int32 pad[64]; + int32 crash; // Signals a previous crash. + int32 experiment; // Id of an ongoing test. + int32 pad[62]; LruData lru; // Eviction control data. IndexHeader() { memset(this, 0, sizeof(*this)); diff --git a/net/disk_cache/errors.h b/net/disk_cache/errors.h index 3922abc..eea087a 100644 --- a/net/disk_cache/errors.h +++ b/net/disk_cache/errors.h @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -// Error codes reported by self tests. +// Error codes reported by self tests or to UMA. #ifndef NET_DISK_CACHE_ERRORS_H__ #define NET_DISK_CACHE_ERRORS_H__ @@ -19,7 +19,9 @@ enum { ERR_INVALID_ADDRESS = -7, ERR_INVALID_LINKS = -8, ERR_NUM_ENTRIES_MISMATCH = -9, - ERR_READ_FAILURE = -10 + ERR_READ_FAILURE = -10, + ERR_PREVIOUS_CRASH = -11, + ERR_STORAGE_ERROR = -12 }; } // namespace disk_cache |