diff options
-rw-r--r-- | net/disk_cache/backend_impl.cc | 125 | ||||
-rw-r--r-- | net/disk_cache/backend_impl.h | 13 | ||||
-rw-r--r-- | net/disk_cache/disk_format.h | 6 | ||||
-rw-r--r-- | net/disk_cache/eviction.cc | 18 | ||||
-rw-r--r-- | net/disk_cache/stats.cc | 36 | ||||
-rw-r--r-- | net/disk_cache/stats.h | 9 |
6 files changed, 192 insertions, 15 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc index 9d9628d..9216d22 100644 --- a/net/disk_cache/backend_impl.cc +++ b/net/disk_cache/backend_impl.cc @@ -676,6 +676,50 @@ void BackendImpl::TooMuchStorageRequested(int32 size) { stats_.ModifyStorageStats(0, size); } +// We want to remove biases from some histograms so we only send data once per +// week. +bool BackendImpl::ShouldReportAgain() { + static bool first_time = true; + static bool should_send = false; + + if (!first_time) + return should_send; + + first_time = false; + int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); + Time last_time = Time::FromInternalValue(last_report); + if (!last_report || (Time::Now() - last_time).InDays() >= 7) { + stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); + should_send = true; + return true; + } + return false; +} + +void BackendImpl::FirstEviction() { + DCHECK(data_->header.create_time); + + Time create_time = Time::FromInternalValue(data_->header.create_time); + static Histogram counter("DiskCache.FillupAge", 1, 10000, 50); + counter.SetFlags(kUmaTargetedHistogramFlag); + counter.Add((Time::Now() - create_time).InHours()); + + int64 use_hours = stats_.GetCounter(Stats::TIMER) / 120; + static Histogram counter2("DiskCache.FillupTime", 1, 10000, 50); + counter2.SetFlags(kUmaTargetedHistogramFlag); + counter2.Add(static_cast<int>(use_hours)); + + UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstHitRatio", stats_.GetHitRatio()); + + int avg_size = data_->header.num_bytes / GetEntryCount(); + UMA_HISTOGRAM_COUNTS("DiskCache.FirstEntrySize", avg_size); + + int large_entries_bytes = stats_.GetLargeEntriesSize(); + int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; + UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstLargeEntriesRatio", large_ratio); + stats_.ResetRatios(); +} + void BackendImpl::CriticalError(int error) { LOG(ERROR) << "Critical error found " << error; if (disabled_) @@ -721,13 +765,8 @@ void BackendImpl::OnStatsTimer() { first_time = false; if (first_time) { first_time = false; - int experiment = data_->header.experiment; - std::string entries(StringPrintf("DiskCache.Entries_%d", experiment)); - std::string size(StringPrintf("DiskCache.Size_%d", experiment)); - std::string max_size(StringPrintf("DiskCache.MaxSize_%d", experiment)); - UMA_HISTOGRAM_COUNTS(entries.c_str(), data_->header.num_entries); - UMA_HISTOGRAM_COUNTS(size.c_str(), data_->header.num_bytes / (1024 * 1024)); - UMA_HISTOGRAM_COUNTS(max_size.c_str(), max_size_ / (1024 * 1024)); + if (ShouldReportAgain()) + ReportStats(); } // Save stats to disk at 5 min intervals. @@ -793,6 +832,8 @@ bool BackendImpl::CreateBackingStore(disk_cache::File* file) { if (new_eviction_) header.version = 0x20001; + header.create_time = Time::Now().ToInternalValue(); + if (!file->Write(&header, sizeof(header), 0)) return false; @@ -1226,6 +1267,76 @@ void BackendImpl::LogStats() { } } +void BackendImpl::ReportStats() { + int experiment = data_->header.experiment; + std::string entries(StringPrintf("DiskCache.Entries_%d", experiment)); + std::string size(StringPrintf("DiskCache.Size_%d", experiment)); + std::string max_size(StringPrintf("DiskCache.MaxSize_%d", experiment)); + UMA_HISTOGRAM_COUNTS(entries.c_str(), data_->header.num_entries); + UMA_HISTOGRAM_COUNTS(size.c_str(), data_->header.num_bytes / (1024 * 1024)); + UMA_HISTOGRAM_COUNTS(max_size.c_str(), max_size_ / (1024 * 1024)); + + UMA_HISTOGRAM_COUNTS("DiskCache.AverageOpenEntries", + static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); + UMA_HISTOGRAM_COUNTS("DiskCache.MaxOpenEntries", + static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); + stats_.SetCounter(Stats::MAX_ENTRIES, 0); + + if (!data_->header.create_time) { + // This is a client running the experiment on the dev channel. + std::string hit_ratio(StringPrintf("DiskCache.HitRatio_%d", experiment)); + UMA_HISTOGRAM_PERCENTAGE(hit_ratio.c_str(), stats_.GetHitRatio()); + stats_.ResetRatios(); + + if (!data_->header.num_bytes) + return; + + int large_entries_bytes = stats_.GetLargeEntriesSize(); + int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; + std::string large_ratio_name(StringPrintf("DiskCache.LargeEntriesRatio_%d", + experiment)); + UMA_HISTOGRAM_PERCENTAGE(large_ratio_name.c_str(), large_ratio); + return; + } + + if (!data_->header.lru.filled) + return; + + // This is an up to date client that will report FirstEviction() data. After + // that event, start reporting this: + + int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; + static Histogram counter("DiskCache.TotalTime", 1, 10000, 50); + counter.SetFlags(kUmaTargetedHistogramFlag); + counter.Add(static_cast<int>(total_hours)); + + int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; + if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) + return; + + static Histogram counter2("DiskCache.UseTime", 1, 10000, 50); + counter2.SetFlags(kUmaTargetedHistogramFlag); + counter2.Add(static_cast<int>(use_hours)); + + UMA_HISTOGRAM_PERCENTAGE("DiskCache.HitRatio", stats_.GetHitRatio()); + UMA_HISTOGRAM_PERCENTAGE("DiskCache.ResurrectRatio", + stats_.GetResurrectRatio()); + + int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; + UMA_HISTOGRAM_COUNTS("DiskCache.TrimRate", static_cast<int>(trim_rate)); + + int avg_size = data_->header.num_bytes / GetEntryCount(); + UMA_HISTOGRAM_COUNTS("DiskCache.EntrySize", avg_size); + + int large_entries_bytes = stats_.GetLargeEntriesSize(); + int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; + UMA_HISTOGRAM_PERCENTAGE("DiskCache.LargeEntriesRatio", large_ratio); + + stats_.ResetRatios(); + stats_.SetCounter(Stats::TRIM_ENTRY, 0); + stats_.SetCounter(Stats::LAST_REPORT_TIMER, 0); +} + void BackendImpl::UpgradeTo2_1() { // 2.1 is basically the same as 2.0, except that new fields are actually // updated by the new eviction algorithm. diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h index def2119..fd1f9be 100644 --- a/net/disk_cache/backend_impl.h +++ b/net/disk_cache/backend_impl.h @@ -100,6 +100,14 @@ class BackendImpl : public Backend { // Logs requests that are denied due to being too big. void TooMuchStorageRequested(int32 size); + // Returns true if we should send histograms for this user again. The caller + // must call this function only once per run (because it returns always the + // same thing on a given run). + bool ShouldReportAgain(); + + // Reports some data when we filled up the cache. + void FirstEviction(); + // Reports a critical error (and disables the cache). void CriticalError(int error); @@ -182,6 +190,9 @@ class BackendImpl : public Backend { // Dumps current cache statistics to the log. void LogStats(); + // Send UMA stats. + void ReportStats(); + // Upgrades the index file to version 2.1. void UpgradeTo2_1(); @@ -204,7 +215,7 @@ class BackendImpl : public Backend { Eviction eviction_; // Handler of the eviction algorithm. int num_refs_; // Number of referenced cache entries. int max_refs_; // Max number of referenced cache entries. - int num_pending_io_; // Number of pending IO operations; + int num_pending_io_; // Number of pending IO operations. bool init_; // controls the initialization of the system. bool restarted_; bool unit_test_; diff --git a/net/disk_cache/disk_format.h b/net/disk_cache/disk_format.h index eced8e3..1d5dcd7 100644 --- a/net/disk_cache/disk_format.h +++ b/net/disk_cache/disk_format.h @@ -66,7 +66,8 @@ const uint32 kIndexMagic = 0xC103CAC3; const uint32 kCurrentVersion = 0x20000; // Version 2.0. struct LruData { - int32 pad1[3]; + int32 pad1[2]; + int32 filled; // Flag to tell when we filled the cache. int32 sizes[5]; CacheAddr heads[5]; CacheAddr tails[5]; @@ -88,7 +89,8 @@ struct IndexHeader { int32 table_len; // Actual size of the table (0 == kIndexTablesize). int32 crash; // Signals a previous crash. int32 experiment; // Id of an ongoing test. - int32 pad[54]; + uint64 create_time; // Creation time for this set of files. + int32 pad[52]; LruData lru; // Eviction control data. IndexHeader() { memset(this, 0, sizeof(*this)); diff --git a/net/disk_cache/eviction.cc b/net/disk_cache/eviction.cc index 6f7df82..16d0957 100644 --- a/net/disk_cache/eviction.cc +++ b/net/disk_cache/eviction.cc @@ -140,11 +140,19 @@ void Eviction::ReportTrimTimes(EntryImpl* entry) { static bool first_time = true; if (first_time) { first_time = false; - std::string name(StringPrintf("DiskCache.TrimAge_%d", - header_->experiment)); - static Histogram counter(name.c_str(), 1, 10000, 50); - counter.SetFlags(kUmaTargetedHistogramFlag); - counter.Add((Time::Now() - entry->GetLastUsed()).InHours()); + if (backend_->ShouldReportAgain()) { + std::string name(StringPrintf("DiskCache.TrimAge_%d", + header_->experiment)); + static Histogram counter(name.c_str(), 1, 10000, 50); + counter.SetFlags(kUmaTargetedHistogramFlag); + counter.Add((Time::Now() - entry->GetLastUsed()).InHours()); + } + + if (header_->create_time || !header_->lru.filled) { + // This is the first entry that we have to evict, generate some noise. + header_->lru.filled = 1; + backend_->FirstEviction(); + } } } diff --git a/net/disk_cache/stats.cc b/net/disk_cache/stats.cc index 46c41a0..4f9440e 100644 --- a/net/disk_cache/stats.cc +++ b/net/disk_cache/stats.cc @@ -54,6 +54,8 @@ static const char* kCounterNames[] = { "Open rankings", "Get rankings", "Fatal error", + "Last report", + "Last report timer" }; COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER, update_the_names); @@ -257,6 +259,40 @@ void Stats::GetItems(StatsItems* items) { } } +int Stats::GetHitRatio() const { + return GetRatio(OPEN_HIT, OPEN_MISS); +} + +int Stats::GetResurrectRatio() const { + return GetRatio(RESURRECT_HIT, CREATE_HIT); +} + +int Stats::GetRatio(Counters hit, Counters miss) const { + int64 ratio = GetCounter(hit) * 100; + if (!ratio) + return 0; + + ratio /= (GetCounter(hit) + GetCounter(miss)); + return static_cast<int>(ratio); +} + +void Stats::ResetRatios() { + SetCounter(OPEN_HIT, 0); + SetCounter(OPEN_MISS, 0); + SetCounter(RESURRECT_HIT, 0); + SetCounter(CREATE_HIT, 0); +} + +int Stats::GetLargeEntriesSize() { + int total = 0; + // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before + // GetStatsBucket()). + for (int bucket = 20; bucket < kDataSizesLength; bucket++) + total += data_sizes_[bucket] * GetBucketRange(bucket); + + return total; +} + void Stats::Store() { if (!backend_) return; diff --git a/net/disk_cache/stats.h b/net/disk_cache/stats.h index 6658df6..32585d4 100644 --- a/net/disk_cache/stats.h +++ b/net/disk_cache/stats.h @@ -42,6 +42,8 @@ class Stats { OPEN_RANKINGS, // An entry has to be read just to modify rankings. GET_RANKINGS, // We got the ranking info without reading the whole entry. FATAL_ERROR, + LAST_REPORT, // Time of the last time we sent a report. + LAST_REPORT_TIMER, // Timer count since last report. MAX_COUNTER }; @@ -59,6 +61,12 @@ class Stats { int64 GetCounter(Counters counter) const; void GetItems(StatsItems* items); + int GetHitRatio() const; + int GetResurrectRatio() const; + void ResetRatios(); + + // Returns the lower bound of the space used by entries bigger than 512 KB. + int GetLargeEntriesSize(); // Saves the stats to disk. void Store(); @@ -70,6 +78,7 @@ class Stats { private: int GetStatsBucket(int32 size); + int GetRatio(Counters hit, Counters miss) const; BackendImpl* backend_; uint32 storage_addr_; |