diff options
author | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-06-25 17:28:31 +0000 |
---|---|---|
committer | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-06-25 17:28:31 +0000 |
commit | 55185493afba3b7813d44a25c614975c0bc0f32b (patch) | |
tree | b8891744621915ba5cd12d0dd8e305f3ec5e0c5f /net | |
parent | b235357d38b41a6d3dbf552fd9f4aee3261af0ba (diff) | |
download | chromium_src-55185493afba3b7813d44a25c614975c0bc0f32b.zip chromium_src-55185493afba3b7813d44a25c614975c0bc0f32b.tar.gz chromium_src-55185493afba3b7813d44a25c614975c0bc0f32b.tar.bz2 |
Disk Cache: Split some time histograms in groups based
on the cache size.
BUG=10727
TEST=none
Review URL: http://codereview.chromium.org/146129
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@19256 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/disk_cache/backend_impl.cc | 15 | ||||
-rw-r--r-- | net/disk_cache/backend_impl.h | 3 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.cc | 68 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.h | 20 | ||||
-rw-r--r-- | net/disk_cache/eviction.cc | 4 |
5 files changed, 75 insertions, 35 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc index 5f7eb93..d17b5e3 100644 --- a/net/disk_cache/backend_impl.cc +++ b/net/disk_cache/backend_impl.cc @@ -361,7 +361,7 @@ bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) { DCHECK(entry); *entry = cache_entry; - CACHE_UMA(AGE_MS, "OpenTime", 0, start); + CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start); stats_.OnEvent(Stats::OPEN_HIT); return true; } @@ -439,7 +439,7 @@ bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) { cache_entry.swap(reinterpret_cast<EntryImpl**>(entry)); - CACHE_UMA(AGE_MS, "CreateTime", 0, start); + CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start); stats_.OnEvent(Stats::CREATE_HIT); Trace("create entry hit "); return true; @@ -750,6 +750,17 @@ std::string BackendImpl::HistogramName(const char* name, int experiment) { return StringPrintf("DiskCache.%d.%s_%d", cache_type_, name, experiment); } +int BackendImpl::GetSizeGroup() { + if (disabled_) + return 0; + + // We want to report times grouped by the current cache size (50 MB groups). + int group = data_->header.num_bytes / (50 * 1024 * 1024); + if (group > 6) + group = 6; // Limit the number of groups, just in case. + return group; +} + // We want to remove biases from some histograms so we only send data once per // week. bool BackendImpl::ShouldReportAgain() { diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h index a061feb..0aae9de 100644 --- a/net/disk_cache/backend_impl.h +++ b/net/disk_cache/backend_impl.h @@ -122,6 +122,9 @@ class BackendImpl : public Backend { return cache_type_; } + // Returns the group for this client, based on the current cache size. + int GetSizeGroup(); + // Returns true if we should send histograms for this user again. The caller // must call this function only once per run (because it returns always the // same thing on a given run). diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc index c9bab9a..0c8ee87 100644 --- a/net/disk_cache/entry_impl.cc +++ b/net/disk_cache/entry_impl.cc @@ -81,7 +81,7 @@ EntryImpl::EntryImpl(BackendImpl* backend, Addr address) entry_.LazyInit(backend->File(address), address); doomed_ = false; backend_ = backend; - for (int i = 0; i < NUM_STREAMS; i++) { + for (int i = 0; i < kNumStreams; i++) { unreported_size_[i] = 0; } } @@ -100,7 +100,7 @@ EntryImpl::~EntryImpl() { DeleteEntryData(true); } else { bool ret = true; - for (int index = 0; index < NUM_STREAMS; index++) { + for (int index = 0; index < kNumStreams; index++) { if (user_buffers_[index].get()) { if (!(ret = Flush(index, entry_.Data()->data_size[index], false))) LOG(ERROR) << "Failed to save user data"; @@ -148,7 +148,7 @@ std::string EntryImpl::GetKey() const { if (entry->Data()->key_len > kMaxInternalKeyLength) { Addr address(entry->Data()->long_key); DCHECK(address.is_initialized()); - COMPILE_ASSERT(NUM_STREAMS == kKeyFileIndex, invalid_key_index); + COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); File* file = const_cast<EntryImpl*>(this)->GetBackingFile(address, kKeyFileIndex); @@ -177,7 +177,7 @@ Time EntryImpl::GetLastModified() const { } int32 EntryImpl::GetDataSize(int index) const { - if (index < 0 || index >= NUM_STREAMS) + if (index < 0 || index >= kNumStreams) return 0; CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); @@ -187,7 +187,7 @@ int32 EntryImpl::GetDataSize(int index) const { int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback) { DCHECK(node_.Data()->dirty); - if (index < 0 || index >= NUM_STREAMS) + if (index < 0 || index >= kNumStreams) return net::ERR_INVALID_ARGUMENT; int entry_size = entry_.Data()->data_size[index]; @@ -198,9 +198,6 @@ int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, return net::ERR_INVALID_ARGUMENT; Time start = Time::Now(); - static Histogram stats("DiskCache.ReadTime", TimeDelta::FromMilliseconds(1), - TimeDelta::FromSeconds(10), 50); - stats.SetFlags(kUmaTargetedHistogramFlag); if (offset + buf_len > entry_size) buf_len = entry_size - offset; @@ -213,8 +210,7 @@ int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, // Complete the operation locally. DCHECK(kMaxBlockSize >= offset + buf_len); memcpy(buf->data() , user_buffers_[index].get() + offset, buf_len); - if (backend_->cache_type() == net::DISK_CACHE) - stats.AddTime(Time::Now() - start); + ReportIOTime(kRead, start); return buf_len; } @@ -246,8 +242,7 @@ int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, if (io_callback && completed) io_callback->Discard(); - if (backend_->cache_type() == net::DISK_CACHE) - stats.AddTime(Time::Now() - start); + ReportIOTime(kRead, start); return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; } @@ -255,7 +250,7 @@ int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, net::CompletionCallback* completion_callback, bool truncate) { DCHECK(node_.Data()->dirty); - if (index < 0 || index >= NUM_STREAMS) + if (index < 0 || index >= kNumStreams) return net::ERR_INVALID_ARGUMENT; if (offset < 0 || buf_len < 0) @@ -274,9 +269,6 @@ int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, } Time start = Time::Now(); - static Histogram stats("DiskCache.WriteTime", TimeDelta::FromMilliseconds(1), - TimeDelta::FromSeconds(10), 50); - stats.SetFlags(kUmaTargetedHistogramFlag); // Read the size at this point (it may change inside prepare). int entry_size = entry_.Data()->data_size[index]; @@ -314,8 +306,7 @@ int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, DCHECK(kMaxBlockSize >= offset + buf_len); memcpy(user_buffers_[index].get() + offset, buf->data(), buf_len); - if (backend_->cache_type() == net::DISK_CACHE) - stats.AddTime(Time::Now() - start); + ReportIOTime(kWrite, start); return buf_len; } @@ -351,8 +342,7 @@ int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, if (io_callback && completed) io_callback->Discard(); - if (backend_->cache_type() == net::DISK_CACHE) - stats.AddTime(Time::Now() - start); + ReportIOTime(kWrite, start); return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING; } @@ -363,8 +353,11 @@ int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, if (net::OK != result) return result; - return sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, - completion_callback); + Time start = Time::Now(); + result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, + completion_callback); + ReportIOTime(kSparseRead, start); + return result; } int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, @@ -374,8 +367,11 @@ int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, if (net::OK != result) return result; - return sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, buf_len, - completion_callback); + Time start = Time::Now(); + result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, + buf_len, completion_callback); + ReportIOTime(kSparseWrite, start); + return result; } int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) { @@ -461,7 +457,7 @@ void EntryImpl::DeleteEntryData(bool everything) { CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); if (GetDataSize(1)) CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); - for (int index = 0; index < NUM_STREAMS; index++) { + for (int index = 0; index < kNumStreams; index++) { Addr address(entry_.Data()->data_addr[index]); if (address.is_initialized()) { DeleteData(address, index); @@ -585,7 +581,7 @@ void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { } bool EntryImpl::CreateDataBlock(int index, int size) { - DCHECK(index >= 0 && index < NUM_STREAMS); + DCHECK(index >= 0 && index < kNumStreams); Addr address(entry_.Data()->data_addr[index]); if (!CreateBlock(size, &address)) @@ -845,6 +841,26 @@ int EntryImpl::InitSparseData() { return result; } +void EntryImpl::ReportIOTime(Operation op, const base::Time& start) { + int group = backend_->GetSizeGroup(); + switch (op) { + case kRead: + CACHE_UMA(AGE_MS, "ReadTime", group, start); + break; + case kWrite: + CACHE_UMA(AGE_MS, "WriteTime", group, start); + break; + case kSparseRead: + CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); + break; + case kSparseWrite: + CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); + break; + default: + NOTREACHED(); + } +} + void EntryImpl::Log(const char* msg) { void* pointer = NULL; int dirty = 0; diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h index 00f44db..89c9573 100644 --- a/net/disk_cache/entry_impl.h +++ b/net/disk_cache/entry_impl.h @@ -101,7 +101,14 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> { private: enum { - NUM_STREAMS = 3 + kNumStreams = 3 + }; + + enum Operation { + kRead, + kWrite, + kSparseRead, + kSparseWrite }; ~EntryImpl(); @@ -143,16 +150,19 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> { // Initializes the sparse control object. Returns a net error code. int InitSparseData(); + // Generates a histogram for the time spent working on this operation. + void ReportIOTime(Operation op, const base::Time& start); + // Logs this entry to the internal trace buffer. void Log(const char* msg); CacheEntryBlock entry_; // Key related information for this entry. CacheRankingsBlock node_; // Rankings related information for this entry. BackendImpl* backend_; // Back pointer to the cache. - scoped_array<char> user_buffers_[NUM_STREAMS]; // Store user data. - scoped_refptr<File> files_[NUM_STREAMS + 1]; // Files to store external user - // data and key. - int unreported_size_[NUM_STREAMS]; // Bytes not reported yet to the backend. + scoped_array<char> user_buffers_[kNumStreams]; // Store user data. + scoped_refptr<File> files_[kNumStreams + 1]; // Files to store external + // user data and key. + int unreported_size_[kNumStreams]; // Bytes not reported yet to the backend. bool doomed_; // True if this entry was removed from the cache. scoped_ptr<SparseControl> sparse_; // Support for sparse entries. diff --git a/net/disk_cache/eviction.cc b/net/disk_cache/eviction.cc index f4e74b2..c29d730 100644 --- a/net/disk_cache/eviction.cc +++ b/net/disk_cache/eviction.cc @@ -104,7 +104,7 @@ void Eviction::TrimCache(bool empty) { } } - CACHE_UMA(AGE_MS, "TotalTrimTime", 0, start); + CACHE_UMA(AGE_MS, "TotalTrimTime", backend_->GetSizeGroup(), start); trimming_ = false; Trace("*** Trim Cache end ***"); return; @@ -276,7 +276,7 @@ void Eviction::TrimCacheV2(bool empty) { factory_.NewRunnableMethod(&Eviction::TrimDeleted, empty)); } - CACHE_UMA(AGE_MS, "TotalTrimTime", 0, start); + CACHE_UMA(AGE_MS, "TotalTrimTime", backend_->GetSizeGroup(), start); Trace("*** Trim Cache end ***"); trimming_ = false; return; |