diff options
author | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-02-22 23:14:23 +0000 |
---|---|---|
committer | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-02-22 23:14:23 +0000 |
commit | b36a7bdca1c1ebc66e9b401296c90d1f69ae686c (patch) | |
tree | 8af2fd3186046226bf24c262a563f9605c3553f3 /net/disk_cache | |
parent | 2c90e027622a681968bba29fba329c982333d791 (diff) | |
download | chromium_src-b36a7bdca1c1ebc66e9b401296c90d1f69ae686c.zip chromium_src-b36a7bdca1c1ebc66e9b401296c90d1f69ae686c.tar.gz chromium_src-b36a7bdca1c1ebc66e9b401296c90d1f69ae686c.tar.bz2 |
Disk cache: Use TimeTicks instead of Time for some of the
cache histograms.
BUG=14193
TEST=none
Review URL: http://codereview.chromium.org/650068
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@39654 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r-- | net/disk_cache/backend_impl.cc | 7 | ||||
-rw-r--r-- | net/disk_cache/block_files.cc | 13 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.cc | 16 | ||||
-rw-r--r-- | net/disk_cache/entry_impl.h | 2 | ||||
-rw-r--r-- | net/disk_cache/eviction.cc | 11 | ||||
-rw-r--r-- | net/disk_cache/histogram_macros.h | 4 | ||||
-rw-r--r-- | net/disk_cache/rankings.cc | 5 |
7 files changed, 32 insertions, 26 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc index f233172..194a74e 100644 --- a/net/disk_cache/backend_impl.cc +++ b/net/disk_cache/backend_impl.cc @@ -27,6 +27,7 @@ using base::Time; using base::TimeDelta; +using base::TimeTicks; namespace { @@ -350,7 +351,7 @@ bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) { if (disabled_) return false; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); uint32 hash = Hash(key); EntryImpl* cache_entry = MatchEntry(key, hash, false); @@ -390,7 +391,7 @@ bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) { DCHECK(entry); *entry = NULL; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); uint32 hash = Hash(key); scoped_refptr<EntryImpl> parent; @@ -1160,7 +1161,7 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) { return ERR_INVALID_ADDRESS; } - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); if (!cache_entry->entry()->Load()) return ERR_READ_FAILURE; diff --git a/net/disk_cache/block_files.cc b/net/disk_cache/block_files.cc index fe02f67..3161317 100644 --- a/net/disk_cache/block_files.cc +++ b/net/disk_cache/block_files.cc @@ -12,6 +12,7 @@ #include "net/disk_cache/file_lock.h" using base::Time; +using base::TimeTicks; namespace { @@ -41,7 +42,7 @@ bool CreateMapBlock(int target, int size, disk_cache::BlockFileHeader* header, return false; } - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); // We are going to process the map on 32-block chunks (32 bits), and on every // chunk, iterate through the 8 nibbles where the new block can be located. int current = header->hints[target - 1]; @@ -67,7 +68,7 @@ bool CreateMapBlock(int target, int size, disk_cache::BlockFileHeader* header, if (target != size) { header->empty[target - size - 1]++; } - HISTOGRAM_TIMES("DiskCache.CreateBlock", Time::Now() - start); + HISTOGRAM_TIMES("DiskCache.CreateBlock", TimeTicks::Now() - start); return true; } } @@ -86,7 +87,7 @@ void DeleteMapBlock(int index, int size, disk_cache::BlockFileHeader* header) { NOTREACHED(); return; } - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); int byte_index = index / 8; uint8* byte_map = reinterpret_cast<uint8*>(header->allocation_map); uint8 map_block = byte_map[byte_index]; @@ -115,7 +116,7 @@ void DeleteMapBlock(int index, int size, disk_cache::BlockFileHeader* header) { } header->num_entries--; DCHECK(header->num_entries >= 0); - HISTOGRAM_TIMES("DiskCache.DeleteBlock", Time::Now() - start); + HISTOGRAM_TIMES("DiskCache.DeleteBlock", TimeTicks::Now() - start); } // Restores the "empty counters" and allocation hints. @@ -314,7 +315,7 @@ MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) { MappedFile* file = block_files_[block_type - 1]; BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); while (NeedToGrowBlockFile(header, block_count)) { if (kMaxBlocks == header->max_entries) { file = NextFile(file); @@ -328,7 +329,7 @@ MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) { return NULL; break; } - HISTOGRAM_TIMES("DiskCache.GetFileForNewBlock", Time::Now() - start); + HISTOGRAM_TIMES("DiskCache.GetFileForNewBlock", TimeTicks::Now() - start); return file; } diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc index e3fda2a..67fa32b 100644 --- a/net/disk_cache/entry_impl.cc +++ b/net/disk_cache/entry_impl.cc @@ -17,6 +17,7 @@ using base::Time; using base::TimeDelta; +using base::TimeTicks; namespace { @@ -29,7 +30,8 @@ class SyncCallback: public disk_cache::FileIOCallback { public: SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer, net::CompletionCallback* callback ) - : entry_(entry), callback_(callback), buf_(buffer), start_(Time::Now()) { + : entry_(entry), callback_(callback), buf_(buffer), + start_(TimeTicks::Now()) { entry->AddRef(); entry->IncrementIoCount(); } @@ -41,7 +43,7 @@ class SyncCallback: public disk_cache::FileIOCallback { disk_cache::EntryImpl* entry_; net::CompletionCallback* callback_; scoped_refptr<net::IOBuffer> buf_; - Time start_; + TimeTicks start_; DISALLOW_EVIL_CONSTRUCTORS(SyncCallback); }; @@ -199,7 +201,7 @@ int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, if (buf_len < 0) return net::ERR_INVALID_ARGUMENT; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); if (offset + buf_len > entry_size) buf_len = entry_size - offset; @@ -270,7 +272,7 @@ int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len, return net::ERR_FAILED; } - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); // Read the size at this point (it may change inside prepare). int entry_size = entry_.Data()->data_size[index]; @@ -355,7 +357,7 @@ int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, if (net::OK != result) return result; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, completion_callback); ReportIOTime(kSparseRead, start); @@ -369,7 +371,7 @@ int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, if (net::OK != result) return result; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, buf_len, completion_callback); ReportIOTime(kSparseWrite, start); @@ -596,7 +598,7 @@ void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { node_.set_modified(); } -void EntryImpl::ReportIOTime(Operation op, const base::Time& start) { +void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { int group = backend_->GetSizeGroup(); switch (op) { case kRead: diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h index 76e4965..1cc0b84 100644 --- a/net/disk_cache/entry_impl.h +++ b/net/disk_cache/entry_impl.h @@ -112,7 +112,7 @@ class EntryImpl : public Entry, public base::RefCounted<EntryImpl> { void SetTimes(base::Time last_used, base::Time last_modified); // Generates a histogram for the time spent working on this operation. - void ReportIOTime(Operation op, const base::Time& start); + void ReportIOTime(Operation op, const base::TimeTicks& start); private: enum { diff --git a/net/disk_cache/eviction.cc b/net/disk_cache/eviction.cc index fd5bd2e..0d92935 100644 --- a/net/disk_cache/eviction.cc +++ b/net/disk_cache/eviction.cc @@ -38,6 +38,7 @@ #include "net/disk_cache/trace.h" using base::Time; +using base::TimeTicks; namespace { @@ -83,7 +84,7 @@ void Eviction::TrimCache(bool empty) { Trace("*** Trim Cache ***"); trimming_ = true; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); Rankings::ScopedRankingsBlock node(rankings_); Rankings::ScopedRankingsBlock next(rankings_, rankings_->GetPrev(node.get(), Rankings::NO_USE)); @@ -104,7 +105,7 @@ void Eviction::TrimCache(bool empty) { if (!empty) { backend_->OnEvent(Stats::TRIM_ENTRY); - if ((Time::Now() - start).InMilliseconds() > 20) { + if ((TimeTicks::Now() - start).InMilliseconds() > 20) { MessageLoop::current()->PostTask(FROM_HERE, factory_.NewRunnableMethod(&Eviction::TrimCache, false)); break; @@ -245,7 +246,7 @@ bool Eviction::EvictEntry(CacheRankingsBlock* node, bool empty) { void Eviction::TrimCacheV2(bool empty) { Trace("*** Trim Cache ***"); trimming_ = true; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); const int kListsToSearch = 3; Rankings::ScopedRankingsBlock next[kListsToSearch]; @@ -296,7 +297,7 @@ void Eviction::TrimCacheV2(bool empty) { if (!EvictEntry(node.get(), empty)) continue; - if (!empty && (Time::Now() - start).InMilliseconds() > 20) { + if (!empty && (TimeTicks::Now() - start).InMilliseconds() > 20) { MessageLoop::current()->PostTask(FROM_HERE, factory_.NewRunnableMethod(&Eviction::TrimCache, false)); break; @@ -415,7 +416,7 @@ void Eviction::TrimDeleted(bool empty) { if (backend_->disabled_) return; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); Rankings::ScopedRankingsBlock node(rankings_); Rankings::ScopedRankingsBlock next(rankings_, rankings_->GetPrev(node.get(), Rankings::DELETED)); diff --git a/net/disk_cache/histogram_macros.h b/net/disk_cache/histogram_macros.h index 17cd345..27610f5 100644 --- a/net/disk_cache/histogram_macros.h +++ b/net/disk_cache/histogram_macros.h @@ -17,13 +17,13 @@ // HISTOGRAM_AGE will collect time elapsed since |initial_time|, with a // granularity of hours and normal values of a few months. -#define UMA_HISTOGRAM_AGE(name, initial_time)\ +#define UMA_HISTOGRAM_AGE(name, initial_time) \ UMA_HISTOGRAM_COUNTS_10000(name, (Time::Now() - initial_time).InHours()) // HISTOGRAM_AGE_MS will collect time elapsed since |initial_time|, with the // normal resolution of the UMA_HISTOGRAM_TIMES. #define UMA_HISTOGRAM_AGE_MS(name, initial_time)\ - UMA_HISTOGRAM_TIMES(name, Time::Now() - initial_time) + UMA_HISTOGRAM_TIMES(name, TimeTicks::Now() - initial_time) #define UMA_HISTOGRAM_CACHE_ERROR(name, sample) \ UMA_HISTOGRAM_ENUMERATION(name, sample, 50) diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc index d2250df..34c002b 100644 --- a/net/disk_cache/rankings.cc +++ b/net/disk_cache/rankings.cc @@ -11,6 +11,7 @@ #include "net/disk_cache/histogram_macros.h" using base::Time; +using base::TimeTicks; // This is used by crash_cache.exe to generate unit test files. disk_cache::RankCrashes g_rankings_crash = disk_cache::NO_CRASH; @@ -207,7 +208,7 @@ bool Rankings::GetRanking(CacheRankingsBlock* rankings) { if (!rankings->address().is_initialized()) return false; - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); if (!rankings->Load()) return false; @@ -406,7 +407,7 @@ void Rankings::Remove(CacheRankingsBlock* node, List list) { // but the net effect is just an assert on debug when attempting to remove the // entry. Otherwise we'll need reentrant transactions, which is an overkill. void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) { - Time start = Time::Now(); + TimeTicks start = TimeTicks::Now(); Remove(node, list); Insert(node, modified, list); CACHE_UMA(AGE_MS, "UpdateRank", 0, start); |