summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/disk_cache/backend_impl.cc76
-rw-r--r--net/disk_cache/backend_impl.h9
-rw-r--r--net/disk_cache/entry_impl.cc19
-rw-r--r--net/disk_cache/eviction.cc16
-rw-r--r--net/disk_cache/histogram_macros.h68
-rw-r--r--net/disk_cache/rankings.cc5
-rw-r--r--net/disk_cache/stats.cc21
-rw-r--r--net/net.gyp1
8 files changed, 149 insertions, 66 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index ac68ec9..3a0a336 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -20,13 +20,13 @@
// Uncomment this to use the new eviction algorithm.
// #define USE_NEW_EVICTION
+// This has to be defined before including histogram_macros.h from this file.
+#define NET_DISK_CACHE_BACKEND_IMPL_CC_
+#include "net/disk_cache/histogram_macros.h"
+
using base::Time;
using base::TimeDelta;
-// HISTOGRAM_HOURS will collect time related data with a granularity of hours
-// and normal values of a few months.
-#define UMA_HISTOGRAM_HOURS UMA_HISTOGRAM_COUNTS_10000
-
namespace {
const wchar_t* kIndexName = L"index";
@@ -306,7 +306,7 @@ bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) {
DCHECK(entry);
*entry = cache_entry;
- UMA_HISTOGRAM_TIMES("DiskCache.OpenTime", Time::Now() - start);
+ CACHE_UMA(AGE_MS, "OpenTime", 0, start);
stats_.OnEvent(Stats::OPEN_HIT);
return true;
}
@@ -384,7 +384,7 @@ bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) {
cache_entry.swap(reinterpret_cast<EntryImpl**>(entry));
- UMA_HISTOGRAM_TIMES("DiskCache.CreateTime", Time::Now() - start);
+ CACHE_UMA(AGE_MS, "CreateTime", 0, start);
stats_.OnEvent(Stats::CREATE_HIT);
Trace("create entry hit ");
return true;
@@ -688,6 +688,12 @@ void BackendImpl::TooMuchStorageRequested(int32 size) {
stats_.ModifyStorageStats(0, size);
}
+std::string BackendImpl::HistogramName(const char* name, int experiment) {
+ if (!experiment)
+ return StringPrintf("DiskCache.%d.%s", cache_type_, name);
+ return StringPrintf("DiskCache.%d.%s_%d", cache_type_, name, experiment);
+}
+
// We want to remove biases from some histograms so we only send data once per
// week.
bool BackendImpl::ShouldReportAgain() {
@@ -709,19 +715,18 @@ void BackendImpl::FirstEviction() {
DCHECK(data_->header.create_time);
Time create_time = Time::FromInternalValue(data_->header.create_time);
- UMA_HISTOGRAM_HOURS("DiskCache.FillupAge",
- (Time::Now() - create_time).InHours());
+ CACHE_UMA(AGE, "FillupAge", 0, create_time);
int64 use_hours = stats_.GetCounter(Stats::TIMER) / 120;
- UMA_HISTOGRAM_HOURS("DiskCache.FillupTime", static_cast<int>(use_hours));
- UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstHitRatio", stats_.GetHitRatio());
+ CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_hours));
+ CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
int avg_size = data_->header.num_bytes / GetEntryCount();
- UMA_HISTOGRAM_COUNTS("DiskCache.FirstEntrySize", avg_size);
+ CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
int large_entries_bytes = stats_.GetLargeEntriesSize();
int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
- UMA_HISTOGRAM_PERCENTAGE("DiskCache.FirstLargeEntriesRatio", large_ratio);
+ CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
stats_.ResetRatios();
}
@@ -743,12 +748,9 @@ void BackendImpl::CriticalError(int error) {
}
void BackendImpl::ReportError(int error) {
- static LinearHistogram counter("DiskCache.Error", 0, 49, 50);
- counter.SetFlags(kUmaTargetedHistogramFlag);
-
// We transmit positive numbers, instead of direct error codes.
DCHECK(error <= 0);
- counter.Add(error * -1);
+ CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
}
void BackendImpl::OnEvent(Stats::Counters an_event) {
@@ -1273,23 +1275,20 @@ void BackendImpl::LogStats() {
void BackendImpl::ReportStats() {
int experiment = data_->header.experiment;
- std::string entries(StringPrintf("DiskCache.Entries_%d", experiment));
- std::string size(StringPrintf("DiskCache.Size_%d", experiment));
- std::string max_size(StringPrintf("DiskCache.MaxSize_%d", experiment));
- UMA_HISTOGRAM_COUNTS(entries.c_str(), data_->header.num_entries);
- UMA_HISTOGRAM_COUNTS(size.c_str(), data_->header.num_bytes / (1024 * 1024));
- UMA_HISTOGRAM_COUNTS(max_size.c_str(), max_size_ / (1024 * 1024));
-
- UMA_HISTOGRAM_COUNTS("DiskCache.AverageOpenEntries",
- static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
- UMA_HISTOGRAM_COUNTS("DiskCache.MaxOpenEntries",
- static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
+ CACHE_UMA(COUNTS, "Entries", experiment, data_->header.num_entries);
+ CACHE_UMA(COUNTS, "Size", experiment,
+ data_->header.num_bytes / (1024 * 1024));
+ CACHE_UMA(COUNTS, "MaxSize", experiment, max_size_ / (1024 * 1024));
+
+ CACHE_UMA(COUNTS, "AverageOpenEntries", 0,
+ static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
+ CACHE_UMA(COUNTS, "MaxOpenEntries", 0,
+ static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
stats_.SetCounter(Stats::MAX_ENTRIES, 0);
if (!data_->header.create_time) {
// This is a client running the experiment on the dev channel.
- std::string hit_ratio(StringPrintf("DiskCache.HitRatio_%d", experiment));
- UMA_HISTOGRAM_PERCENTAGE(hit_ratio.c_str(), stats_.GetHitRatio());
+ CACHE_UMA(PERCENTAGE, "HitRatio", experiment, stats_.GetHitRatio());
stats_.ResetRatios();
if (!data_->header.num_bytes)
@@ -1297,9 +1296,7 @@ void BackendImpl::ReportStats() {
int large_entries_bytes = stats_.GetLargeEntriesSize();
int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
- std::string large_ratio_name(StringPrintf("DiskCache.LargeEntriesRatio_%d",
- experiment));
- UMA_HISTOGRAM_PERCENTAGE(large_ratio_name.c_str(), large_ratio);
+ CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", experiment, large_ratio);
return;
}
@@ -1310,26 +1307,25 @@ void BackendImpl::ReportStats() {
// that event, start reporting this:
int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
- UMA_HISTOGRAM_HOURS("DiskCache.TotalTime", static_cast<int>(total_hours));
+ CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
return;
- UMA_HISTOGRAM_HOURS("DiskCache.UseTime", static_cast<int>(use_hours));
- UMA_HISTOGRAM_PERCENTAGE("DiskCache.HitRatio", stats_.GetHitRatio());
- UMA_HISTOGRAM_PERCENTAGE("DiskCache.ResurrectRatio",
- stats_.GetResurrectRatio());
+ CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
+ CACHE_UMA(PERCENTAGE, "HitRatio", 0, stats_.GetHitRatio());
+ CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
- UMA_HISTOGRAM_COUNTS("DiskCache.TrimRate", static_cast<int>(trim_rate));
+ CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
int avg_size = data_->header.num_bytes / GetEntryCount();
- UMA_HISTOGRAM_COUNTS("DiskCache.EntrySize", avg_size);
+ CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
int large_entries_bytes = stats_.GetLargeEntriesSize();
int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
- UMA_HISTOGRAM_PERCENTAGE("DiskCache.LargeEntriesRatio", large_ratio);
+ CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
stats_.ResetRatios();
stats_.SetCounter(Stats::TRIM_ENTRY, 0);
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index b66ab2f..3fbc2a4 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -105,6 +105,15 @@ class BackendImpl : public Backend {
// Logs requests that are denied due to being too big.
void TooMuchStorageRequested(int32 size);
+ // Returns the full histogram name, for the given base |name| and experiment,
+ // and the current cache type. The name will be "DiskCache.t.name_e" where n
+ // is th ecache type and e the provided |experiment|.
+ std::string HistogramName(const char* name, int experiment);
+
+ net::CacheType cache_type() {
+ return cache_type_;
+ }
+
// Returns true if we should send histograms for this user again. The caller
// must call this function only once per run (because it returns always the
// same thing on a given run).
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index d82cb48..0173548 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -11,6 +11,7 @@
#include "net/base/net_errors.h"
#include "net/disk_cache/backend_impl.h"
#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/histogram_macros.h"
using base::Time;
using base::TimeDelta;
@@ -208,7 +209,8 @@ int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
// Complete the operation locally.
DCHECK(kMaxBlockSize >= offset + buf_len);
memcpy(buf->data() , user_buffers_[index].get() + offset, buf_len);
- stats.AddTime(Time::Now() - start);
+ if (backend_->cache_type() == net::DISK_CACHE)
+ stats.AddTime(Time::Now() - start);
return buf_len;
}
@@ -240,7 +242,8 @@ int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
if (io_callback && completed)
io_callback->Discard();
- stats.AddTime(Time::Now() - start);
+ if (backend_->cache_type() == net::DISK_CACHE)
+ stats.AddTime(Time::Now() - start);
return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
}
@@ -309,7 +312,8 @@ int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
DCHECK(kMaxBlockSize >= offset + buf_len);
memcpy(user_buffers_[index].get() + offset, buf->data(), buf_len);
- stats.AddTime(Time::Now() - start);
+ if (backend_->cache_type() == net::DISK_CACHE)
+ stats.AddTime(Time::Now() - start);
return buf_len;
}
@@ -345,7 +349,8 @@ int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
if (io_callback && completed)
io_callback->Discard();
- stats.AddTime(Time::Now() - start);
+ if (backend_->cache_type() == net::DISK_CACHE)
+ stats.AddTime(Time::Now() - start);
return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
}
@@ -458,9 +463,9 @@ void EntryImpl::DeleteEntryData(bool everything) {
DCHECK(doomed_ || !everything);
if (GetDataSize(0))
- UMA_HISTOGRAM_COUNTS("DiskCache.DeleteHeader", GetDataSize(0));
+ CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
if (GetDataSize(1))
- UMA_HISTOGRAM_COUNTS("DiskCache.DeleteData", GetDataSize(1));
+ CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
for (int index = 0; index < NUM_STREAMS; index++) {
Addr address(entry_.Data()->data_addr[index]);
if (address.is_initialized()) {
@@ -617,7 +622,7 @@ void EntryImpl::DeleteData(Addr address, int index) {
files_[index] = NULL; // Releases the object.
if (!DeleteCacheFile(backend_->GetFileName(address))) {
- UMA_HISTOGRAM_COUNTS("DiskCache.DeleteFailed", 1);
+ CACHE_UMA(COUNTS, "DeleteFailed", 0, 1);
LOG(ERROR) << "Failed to delete " << backend_->GetFileName(address) <<
" from the cache.";
}
diff --git a/net/disk_cache/eviction.cc b/net/disk_cache/eviction.cc
index 76af8db..c000342 100644
--- a/net/disk_cache/eviction.cc
+++ b/net/disk_cache/eviction.cc
@@ -34,14 +34,11 @@
#include "base/time.h"
#include "net/disk_cache/backend_impl.h"
#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/histogram_macros.h"
#include "net/disk_cache/trace.h"
using base::Time;
-// HISTOGRAM_HOURS will collect time related data with a granularity of hours
-// and normal values of a few months.
-#define UMA_HISTOGRAM_HOURS UMA_HISTOGRAM_COUNTS_10000
-
namespace {
const int kCleanUpMargin = 1024 * 1024;
@@ -105,7 +102,7 @@ void Eviction::TrimCache(bool empty) {
}
}
- UMA_HISTOGRAM_TIMES("DiskCache.TotalTrimTime", Time::Now() - start);
+ CACHE_UMA(AGE_MS, "TotalTrimTime", 0, start);
Trace("*** Trim Cache end ***");
return;
}
@@ -145,10 +142,7 @@ void Eviction::ReportTrimTimes(EntryImpl* entry) {
if (first_trim_) {
first_trim_ = false;
if (backend_->ShouldReportAgain()) {
- std::string name(StringPrintf("DiskCache.TrimAge_%d",
- header_->experiment));
- UMA_HISTOGRAM_HOURS(name.c_str(),
- (Time::Now() - entry->GetLastUsed()).InHours());
+ CACHE_UMA(AGE, "TrimAge", header_->experiment, entry->GetLastUsed());
}
if (header_->create_time && !header_->lru.filled) {
@@ -265,7 +259,7 @@ void Eviction::TrimCacheV2(bool empty) {
factory_.NewRunnableMethod(&Eviction::TrimDeleted, empty));
}
- UMA_HISTOGRAM_TIMES("DiskCache.TotalTrimTime", Time::Now() - start);
+ CACHE_UMA(AGE_MS, "TotalTrimTime", 0, start);
Trace("*** Trim Cache end ***");
return;
}
@@ -375,7 +369,7 @@ void Eviction::TrimDeleted(bool empty) {
MessageLoop::current()->PostTask(FROM_HERE,
factory_.NewRunnableMethod(&Eviction::TrimDeleted, false));
- UMA_HISTOGRAM_TIMES("DiskCache.TotalTrimDeletedTime", Time::Now() - start);
+ CACHE_UMA(AGE_MS, "TotalTrimDeletedTime", 0, start);
Trace("*** Trim Deleted end ***");
return;
}
diff --git a/net/disk_cache/histogram_macros.h b/net/disk_cache/histogram_macros.h
new file mode 100644
index 0000000..ef88ccf
--- /dev/null
+++ b/net/disk_cache/histogram_macros.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros to simplify histogram reporting from the disk
+// cache. The main issue is that we want to have separate histograms for each
+// type of cache (regular vs. media, etc), without adding the complexity of
+// keeping track of a potentially large number of histogram objects that have to
+// survive the backend object that created them.
+
+#ifndef NET_DISK_CACHE_HISTOGRAM_MACROS_H_
+#define NET_DISK_CACHE_HISTOGRAM_MACROS_H_
+
+// HISTOGRAM_HOURS will collect time related data with a granularity of hours
+// and normal values of a few months.
+#define UMA_HISTOGRAM_HOURS UMA_HISTOGRAM_COUNTS_10000
+
+// HISTOGRAM_AGE will collect time elapsed since |initial_time|, with a
+// granularity of hours and normal values of a few months.
+#define UMA_HISTOGRAM_AGE(name, initial_time)\
+ UMA_HISTOGRAM_COUNTS_10000(name, (Time::Now() - initial_time).InHours())
+
+// HISTOGRAM_AGE_MS will collect time elapsed since |initial_time|, with the
+// normal resolution of the UMA_HISTOGRAM_TIMES.
+#define UMA_HISTOGRAM_AGE_MS(name, initial_time)\
+ UMA_HISTOGRAM_TIMES(name, Time::Now() - initial_time)
+
+#define UMA_HISTOGRAM_CACHE_ERROR(name, sample) do { \
+ static LinearHistogram counter((name), 0, 49, 50); \
+ counter.SetFlags(kUmaTargetedHistogramFlag); \
+ counter.Add(sample); \
+ } while (0)
+
+#ifdef NET_DISK_CACHE_BACKEND_IMPL_CC_
+#define BACKEND_OBJ this
+#else
+#define BACKEND_OBJ backend_
+#endif
+
+// Generates a UMA histogram of the given type, generating the proper name for
+// it (asking backend_->HistogramName), and adding the provided sample.
+// For example, to generate a regualar UMA_HISTOGRAM_COUNTS, this macro would
+// be used as:
+// CACHE_UMA(COUNTS, "MyName", 0, 20);
+// CACHE_UMA(COUNTS, "MyExperiment", 530, 55);
+// which roughly translates to:
+// UMA_HISTOGRAM_COUNTS("DiskCache.2.MyName", 20); // "2" is the CacheType.
+// UMA_HISTOGRAM_COUNTS("DiskCache.2.MyExperiment_530", 55);
+//
+#define CACHE_UMA(type, name, experiment, sample) {\
+ const std::string my_name = BACKEND_OBJ->HistogramName(name, experiment);\
+ switch (BACKEND_OBJ->cache_type()) {\
+ case net::DISK_CACHE:\
+ UMA_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ case net::MEDIA_CACHE:\
+ UMA_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ case net::TEMP_MEDIA_CACHE:\
+ UMA_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ default:\
+ NOTREACHED();\
+ break;\
+ }\
+ }
+
+#endif // NET_DISK_CACHE_HISTOGRAM_MACROS_H_
diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc
index 962563b..7de85ac 100644
--- a/net/disk_cache/rankings.cc
+++ b/net/disk_cache/rankings.cc
@@ -8,6 +8,7 @@
#include "net/disk_cache/backend_impl.h"
#include "net/disk_cache/entry_impl.h"
#include "net/disk_cache/errors.h"
+#include "net/disk_cache/histogram_macros.h"
using base::Time;
@@ -234,7 +235,7 @@ bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
EntryImpl* cache_entry =
reinterpret_cast<EntryImpl*>(rankings->Data()->pointer);
rankings->SetData(cache_entry->rankings()->Data());
- UMA_HISTOGRAM_TIMES("DiskCache.GetRankings", Time::Now() - start);
+ CACHE_UMA(AGE_MS, "GetRankings", 0, start);
return true;
}
@@ -391,7 +392,7 @@ void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) {
Time start = Time::Now();
Remove(node, list);
Insert(node, modified, list);
- UMA_HISTOGRAM_TIMES("DiskCache.UpdateRank", Time::Now() - start);
+ CACHE_UMA(AGE_MS, "UpdateRank", 0, start);
}
void Rankings::CompleteTransaction() {
diff --git a/net/disk_cache/stats.cc b/net/disk_cache/stats.cc
index 4f9440e..702b2a36 100644
--- a/net/disk_cache/stats.cc
+++ b/net/disk_cache/stats.cc
@@ -123,16 +123,25 @@ bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
storage_addr_ = address.value();
backend_ = backend;
- if (!size_histogram_.get()) {
- // Stats may be reused when the cache is re-created, but we want only one
- // histogram at any given time.
- size_histogram_.reset(new StatsHistogram("DiskCache.SizeStats"));
- size_histogram_->Init(this);
- }
memcpy(data_sizes_, stats.data_sizes, sizeof(data_sizes_));
memcpy(counters_, stats.counters, sizeof(counters_));
+ // It seems impossible to support this histogram for more than one
+ // simultaneous objects with the current infrastructure.
+ static bool first_time = true;
+ if (first_time) {
+ first_time = false;
+ // ShouldReportAgain() will re-enter this object.
+ if (!size_histogram_.get() && backend->cache_type() == net::DISK_CACHE &&
+ backend->ShouldReportAgain()) {
+ // Stats may be reused when the cache is re-created, but we want only one
+ // histogram at any given time.
+ size_histogram_.reset(new StatsHistogram("DiskCache.SizeStats"));
+ size_histogram_->Init(this);
+ }
+ }
+
return true;
}
diff --git a/net/net.gyp b/net/net.gyp
index 9bb54d0..df06d07 100644
--- a/net/net.gyp
+++ b/net/net.gyp
@@ -163,6 +163,7 @@
'disk_cache/file_win.cc',
'disk_cache/hash.cc',
'disk_cache/hash.h',
+ 'disk_cache/histogram_macros.h',
'disk_cache/mapped_file.h',
'disk_cache/mapped_file_posix.cc',
'disk_cache/mapped_file_win.cc',