summaryrefslogtreecommitdiffstats
path: root/net/disk_cache
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2012-04-18 22:04:56 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2012-04-18 22:04:56 +0000
commitc907656e92631caedf646ca8ee56cc91a0debb51 (patch)
treee9ff89c20b3f4e4fa4f75f85797e7828e46ea857 /net/disk_cache
parentfe5a12c1a0f2ccdbf6e14809428073c011d786ec (diff)
downloadchromium_src-c907656e92631caedf646ca8ee56cc91a0debb51.zip
chromium_src-c907656e92631caedf646ca8ee56cc91a0debb51.tar.gz
chromium_src-c907656e92631caedf646ca8ee56cc91a0debb51.tar.bz2
Disk cache: Remove per-size-group histograms and a few stale ones.
BUG=none TEST=none Review URL: https://chromiumcodereview.appspot.com/10116006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132879 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r--net/disk_cache/backend_impl.cc27
-rw-r--r--net/disk_cache/backend_impl.h3
-rw-r--r--net/disk_cache/entry_impl.cc11
3 files changed, 12 insertions, 29 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 037ffc9..cc8dce7 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -687,7 +687,7 @@ EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
eviction_.OnOpenEntry(cache_entry);
entry_count_++;
- CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start);
+ CACHE_UMA(AGE_MS, "OpenTime", 0, start);
stats_.OnEvent(Stats::OPEN_HIT);
SIMPLE_STATS_COUNTER("disk_cache.hit");
return cache_entry;
@@ -783,7 +783,7 @@ EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
// Link this entry through the lists.
eviction_.OnCreateEntry(cache_entry);
- CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
+ CACHE_UMA(AGE_MS, "CreateTime", 0, start);
stats_.OnEvent(Stats::CREATE_HIT);
SIMPLE_STATS_COUNTER("disk_cache.miss");
Trace("create entry hit ");
@@ -1059,7 +1059,7 @@ void BackendImpl::BufferDeleted(int size) {
}
bool BackendImpl::IsLoaded() const {
- CACHE_UMA(COUNTS, "PendingIO", GetSizeGroup(), num_pending_io_);
+ CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
if (user_flags_ & kNoLoadProtection)
return false;
@@ -1077,17 +1077,6 @@ base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
return ptr_factory_.GetWeakPtr();
}
-int BackendImpl::GetSizeGroup() const {
- if (disabled_)
- return 0;
-
- // We want to report times grouped by the current cache size (50 MB groups).
- int group = data_->header.num_bytes / (50 * 1024 * 1024);
- if (group > 6)
- group = 6; // Limit the number of groups, just in case.
- return group;
-}
-
// We want to remove biases from some histograms so we only send data once per
// week.
bool BackendImpl::ShouldReportAgain() {
@@ -1576,7 +1565,7 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
return ERR_READ_FAILURE;
if (IsLoaded()) {
- CACHE_UMA(AGE_MS, "LoadTime", GetSizeGroup(), start);
+ CACHE_UMA(AGE_MS, "LoadTime", 0, start);
}
if (!cache_entry->SanityCheck()) {
@@ -2004,8 +1993,7 @@ void BackendImpl::ReportStats() {
return;
CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
- CACHE_UMA(PERCENTAGE, "HitRatio", data_->header.experiment,
- stats_.GetHitRatio());
+ CACHE_UMA(PERCENTAGE, "HitRatio", 0, stats_.GetHitRatio());
int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
@@ -2022,15 +2010,14 @@ void BackendImpl::ReportStats() {
CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
if (new_eviction_) {
- CACHE_UMA(PERCENTAGE, "ResurrectRatio", data_->header.experiment,
- stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
- CACHE_UMA(PERCENTAGE, "DeletedRatio", data_->header.experiment,
+ CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
}
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index 663f7fc..f2e6666 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -190,9 +190,6 @@ class NET_EXPORT_PRIVATE BackendImpl : public Backend {
// Returns a weak pointer to this object.
base::WeakPtr<BackendImpl> GetWeakPtr();
- // Returns the group for this client, based on the current cache size.
- int GetSizeGroup() const;
-
// Returns true if we should send histograms for this user again. The caller
// must call this function only once per run (because it returns always the
// same thing on a given run).
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index b7eb18c..922a56b 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -696,13 +696,12 @@ void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
if (!backend_)
return;
- int group = backend_->GetSizeGroup();
switch (op) {
case kRead:
- CACHE_UMA(AGE_MS, "ReadTime", group, start);
+ CACHE_UMA(AGE_MS, "ReadTime", 0, start);
break;
case kWrite:
- CACHE_UMA(AGE_MS, "WriteTime", group, start);
+ CACHE_UMA(AGE_MS, "WriteTime", 0, start);
break;
case kSparseRead:
CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
@@ -711,13 +710,13 @@ void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
break;
case kAsyncIO:
- CACHE_UMA(AGE_MS, "AsyncIOTime", group, start);
+ CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start);
break;
case kReadAsync1:
- CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", group, start);
+ CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start);
break;
case kWriteAsync1:
- CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", group, start);
+ CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start);
break;
default:
NOTREACHED();