summaryrefslogtreecommitdiffstats
path: root/net/disk_cache
diff options
context:
space:
mode:
authorerg@google.com <erg@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-26 22:47:11 +0000
committererg@google.com <erg@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-26 22:47:11 +0000
commitd100e44f64d4abb2cc244cb61bb736c602146767 (patch)
treebfdd81d5424b2335e8543044dd726b0d30666663 /net/disk_cache
parent5d8054efc1e1f26ea806e46869df5e0a84e41a4c (diff)
downloadchromium_src-d100e44f64d4abb2cc244cb61bb736c602146767.zip
chromium_src-d100e44f64d4abb2cc244cb61bb736c602146767.tar.gz
chromium_src-d100e44f64d4abb2cc244cb61bb736c602146767.tar.bz2
More net/ method ordering.
BUG=68682 TEST=compiles Review URL: http://codereview.chromium.org/6339012 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@72710 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/disk_cache')
-rw-r--r--net/disk_cache/backend_impl.cc56
-rw-r--r--net/disk_cache/file_posix.cc36
-rw-r--r--net/disk_cache/mapped_file_posix.cc20
-rw-r--r--net/disk_cache/rankings.cc324
-rw-r--r--net/disk_cache/stats.cc178
5 files changed, 306 insertions, 308 deletions
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 89890e9..0a6ad778 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -344,34 +344,6 @@ int PreferedCacheSize(int64 available) {
// ------------------------------------------------------------------------
-// If the initialization of the cache fails, and force is true, we will discard
-// the whole cache and create a new one. In order to process a potentially large
-// number of files, we'll rename the cache folder to old_ + original_name +
-// number, (located on the same parent folder), and spawn a worker thread to
-// delete all the files on all the stale cache folders. The whole process can
-// still fail if we are not able to rename the cache folder (for instance due to
-// a sharing violation), and in that case a cache for this profile (on the
-// desired path) cannot be created.
-//
-// Static.
-int BackendImpl::CreateBackend(const FilePath& full_path, bool force,
- int max_bytes, net::CacheType type,
- uint32 flags, base::MessageLoopProxy* thread,
- net::NetLog* net_log, Backend** backend,
- CompletionCallback* callback) {
- DCHECK(callback);
- CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type,
- flags, thread, net_log, backend,
- callback);
- // This object will self-destroy when finished.
- return creator->Run();
-}
-
-int BackendImpl::Init(CompletionCallback* callback) {
- background_queue_.Init(callback);
- return net::ERR_IO_PENDING;
-}
-
BackendImpl::BackendImpl(const FilePath& path,
base::MessageLoopProxy* cache_thread,
net::NetLog* net_log)
@@ -436,7 +408,33 @@ BackendImpl::~BackendImpl() {
}
}
-// ------------------------------------------------------------------------
+// If the initialization of the cache fails, and force is true, we will discard
+// the whole cache and create a new one. In order to process a potentially large
+// number of files, we'll rename the cache folder to old_ + original_name +
+// number, (located on the same parent folder), and spawn a worker thread to
+// delete all the files on all the stale cache folders. The whole process can
+// still fail if we are not able to rename the cache folder (for instance due to
+// a sharing violation), and in that case a cache for this profile (on the
+// desired path) cannot be created.
+//
+// Static.
+int BackendImpl::CreateBackend(const FilePath& full_path, bool force,
+ int max_bytes, net::CacheType type,
+ uint32 flags, base::MessageLoopProxy* thread,
+ net::NetLog* net_log, Backend** backend,
+ CompletionCallback* callback) {
+ DCHECK(callback);
+ CacheCreator* creator = new CacheCreator(full_path, force, max_bytes, type,
+ flags, thread, net_log, backend,
+ callback);
+ // This object will self-destroy when finished.
+ return creator->Run();
+}
+
+int BackendImpl::Init(CompletionCallback* callback) {
+ background_queue_.Init(callback);
+ return net::ERR_IO_PENDING;
+}
int BackendImpl::SyncInit() {
DCHECK(!init_);
diff --git a/net/disk_cache/file_posix.cc b/net/disk_cache/file_posix.cc
index 01dafd3..740d108 100644
--- a/net/disk_cache/file_posix.cc
+++ b/net/disk_cache/file_posix.cc
@@ -189,11 +189,6 @@ bool File::Init(const FilePath& name) {
return true;
}
-File::~File() {
- if (platform_file_)
- close(platform_file_);
-}
-
base::PlatformFile File::platform_file() const {
return platform_file_;
}
@@ -255,19 +250,6 @@ bool File::Write(const void* buffer, size_t buffer_len, size_t offset,
return AsyncWrite(buffer, buffer_len, offset, callback, completed);
}
-bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
- FileIOCallback* callback, bool* completed) {
- DCHECK(init_);
- if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
- return false;
-
- GetFileInFlightIO()->PostWrite(this, buffer, buffer_len, offset, callback);
-
- if (completed)
- *completed = false;
- return true;
-}
-
bool File::SetLength(size_t length) {
DCHECK(init_);
if (length > ULONG_MAX)
@@ -290,4 +272,22 @@ void File::WaitForPendingIO(int* num_pending_io) {
DeleteFileInFlightIO();
}
+File::~File() {
+ if (platform_file_)
+ close(platform_file_);
+}
+
+bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ GetFileInFlightIO()->PostWrite(this, buffer, buffer_len, offset, callback);
+
+ if (completed)
+ *completed = false;
+ return true;
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/mapped_file_posix.cc b/net/disk_cache/mapped_file_posix.cc
index f9a361b..9abfa5cd 100644
--- a/net/disk_cache/mapped_file_posix.cc
+++ b/net/disk_cache/mapped_file_posix.cc
@@ -32,16 +32,6 @@ void* MappedFile::Init(const FilePath& name, size_t size) {
return buffer_;
}
-MappedFile::~MappedFile() {
- if (!init_)
- return;
-
- if (buffer_) {
- int ret = munmap(buffer_, view_size_);
- DCHECK(0 == ret);
- }
-}
-
bool MappedFile::Load(const FileBlock* block) {
size_t offset = block->offset() + view_size_;
return Read(block->buffer(), block->size(), offset);
@@ -52,4 +42,14 @@ bool MappedFile::Store(const FileBlock* block) {
return Write(block->buffer(), block->size(), offset);
}
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_) {
+ int ret = munmap(buffer_, view_size_);
+ DCHECK(0 == ret);
+ }
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc
index 801d387..b10dac6 100644
--- a/net/disk_cache/rankings.cc
+++ b/net/disk_cache/rankings.cc
@@ -228,58 +228,6 @@ void Rankings::Reset() {
control_data_ = NULL;
}
-bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
- if (!rankings->address().is_initialized())
- return false;
-
- TimeTicks start = TimeTicks::Now();
- if (!rankings->Load())
- return false;
-
- if (!SanityCheck(rankings, true)) {
- backend_->CriticalError(ERR_INVALID_LINKS);
- return false;
- }
-
- backend_->OnEvent(Stats::OPEN_RANKINGS);
-
- // "dummy" is the old "pointer" value, so it has to be 0.
- if (!rankings->Data()->dirty && !rankings->Data()->dummy)
- return true;
-
- EntryImpl* entry = backend_->GetOpenEntry(rankings);
- if (backend_->GetCurrentEntryId() != rankings->Data()->dirty || !entry) {
- // We cannot trust this entry, but we cannot initiate a cleanup from this
- // point (we may be in the middle of a cleanup already). Just get rid of
- // the invalid pointer and continue; the entry will be deleted when detected
- // from a regular open/create path.
- rankings->Data()->dummy = 0;
- rankings->Data()->dirty = backend_->GetCurrentEntryId() - 1;
- if (!rankings->Data()->dirty)
- rankings->Data()->dirty--;
- return true;
- }
-
- // Note that we should not leave this module without deleting rankings first.
- rankings->SetData(entry->rankings()->Data());
-
- CACHE_UMA(AGE_MS, "GetRankings", 0, start);
- return true;
-}
-
-void Rankings::ConvertToLongLived(CacheRankingsBlock* rankings) {
- if (rankings->own_data())
- return;
-
- // We cannot return a shared node because we are not keeping a reference
- // to the entry that owns the buffer. Make this node a copy of the one that
- // we have, and let the iterator logic update it when the entry changes.
- CacheRankingsBlock temp(NULL, Addr(0));
- *temp.Data() = *rankings->Data();
- rankings->StopSharingData();
- *rankings->Data() = *temp.Data();
-}
-
void Rankings::Insert(CacheRankingsBlock* node, bool modified, List list) {
Trace("Insert 0x%x l %d", node->address().value(), list);
DCHECK(node->HasData());
@@ -443,116 +391,6 @@ void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) {
CACHE_UMA(AGE_MS, "UpdateRank", 0, start);
}
-void Rankings::CompleteTransaction() {
- Addr node_addr(static_cast<CacheAddr>(control_data_->transaction));
- if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
- NOTREACHED();
- LOG(ERROR) << "Invalid rankings info.";
- return;
- }
-
- Trace("CompleteTransaction 0x%x", node_addr.value());
-
- CacheRankingsBlock node(backend_->File(node_addr), node_addr);
- if (!node.Load())
- return;
-
- node.Data()->dummy = 0;
- node.Store();
-
- Addr& my_head = heads_[control_data_->operation_list];
- Addr& my_tail = tails_[control_data_->operation_list];
-
- // We want to leave the node inside the list. The entry must me marked as
- // dirty, and will be removed later. Otherwise, we'll get assertions when
- // attempting to remove the dirty entry.
- if (INSERT == control_data_->operation) {
- Trace("FinishInsert h:0x%x t:0x%x", my_head.value(), my_tail.value());
- FinishInsert(&node);
- } else if (REMOVE == control_data_->operation) {
- Trace("RevertRemove h:0x%x t:0x%x", my_head.value(), my_tail.value());
- RevertRemove(&node);
- } else {
- NOTREACHED();
- LOG(ERROR) << "Invalid operation to recover.";
- }
-}
-
-void Rankings::FinishInsert(CacheRankingsBlock* node) {
- control_data_->transaction = 0;
- control_data_->operation = 0;
- Addr& my_head = heads_[control_data_->operation_list];
- Addr& my_tail = tails_[control_data_->operation_list];
- if (my_head.value() != node->address().value()) {
- if (my_tail.value() == node->address().value()) {
- // This part will be skipped by the logic of Insert.
- node->Data()->next = my_tail.value();
- }
-
- Insert(node, true, static_cast<List>(control_data_->operation_list));
- }
-
- // Tell the backend about this entry.
- backend_->RecoveredEntry(node);
-}
-
-void Rankings::RevertRemove(CacheRankingsBlock* node) {
- Addr next_addr(node->Data()->next);
- Addr prev_addr(node->Data()->prev);
- if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
- // The operation actually finished. Nothing to do.
- control_data_->transaction = 0;
- return;
- }
- if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
- NOTREACHED();
- LOG(WARNING) << "Invalid rankings info.";
- control_data_->transaction = 0;
- return;
- }
-
- CacheRankingsBlock next(backend_->File(next_addr), next_addr);
- CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
- if (!next.Load() || !prev.Load())
- return;
-
- CacheAddr node_value = node->address().value();
- DCHECK(prev.Data()->next == node_value ||
- prev.Data()->next == prev_addr.value() ||
- prev.Data()->next == next.address().value());
- DCHECK(next.Data()->prev == node_value ||
- next.Data()->prev == next_addr.value() ||
- next.Data()->prev == prev.address().value());
-
- if (node_value != prev_addr.value())
- prev.Data()->next = node_value;
- if (node_value != next_addr.value())
- next.Data()->prev = node_value;
-
- List my_list = static_cast<List>(control_data_->operation_list);
- Addr& my_head = heads_[my_list];
- Addr& my_tail = tails_[my_list];
- if (!my_head.is_initialized() || !my_tail.is_initialized()) {
- my_head.set_value(node_value);
- my_tail.set_value(node_value);
- WriteHead(my_list);
- WriteTail(my_list);
- } else if (my_head.value() == next.address().value()) {
- my_head.set_value(node_value);
- prev.Data()->next = next.address().value();
- WriteHead(my_list);
- } else if (my_tail.value() == prev.address().value()) {
- my_tail.set_value(node_value);
- next.Data()->prev = prev.address().value();
- WriteTail(my_list);
- }
-
- next.Store();
- prev.Store();
- control_data_->transaction = 0;
- control_data_->operation = 0;
-}
-
CacheRankingsBlock* Rankings::GetNext(CacheRankingsBlock* node, List list) {
ScopedRankingsBlock next(this);
if (!node) {
@@ -691,6 +529,168 @@ void Rankings::WriteTail(List list) {
control_data_->tails[list] = tails_[list].value();
}
+bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
+ if (!rankings->address().is_initialized())
+ return false;
+
+ TimeTicks start = TimeTicks::Now();
+ if (!rankings->Load())
+ return false;
+
+ if (!SanityCheck(rankings, true)) {
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ backend_->OnEvent(Stats::OPEN_RANKINGS);
+
+ // "dummy" is the old "pointer" value, so it has to be 0.
+ if (!rankings->Data()->dirty && !rankings->Data()->dummy)
+ return true;
+
+ EntryImpl* entry = backend_->GetOpenEntry(rankings);
+ if (backend_->GetCurrentEntryId() != rankings->Data()->dirty || !entry) {
+ // We cannot trust this entry, but we cannot initiate a cleanup from this
+ // point (we may be in the middle of a cleanup already). Just get rid of
+ // the invalid pointer and continue; the entry will be deleted when detected
+ // from a regular open/create path.
+ rankings->Data()->dummy = 0;
+ rankings->Data()->dirty = backend_->GetCurrentEntryId() - 1;
+ if (!rankings->Data()->dirty)
+ rankings->Data()->dirty--;
+ return true;
+ }
+
+ // Note that we should not leave this module without deleting rankings first.
+ rankings->SetData(entry->rankings()->Data());
+
+ CACHE_UMA(AGE_MS, "GetRankings", 0, start);
+ return true;
+}
+
+void Rankings::ConvertToLongLived(CacheRankingsBlock* rankings) {
+ if (rankings->own_data())
+ return;
+
+ // We cannot return a shared node because we are not keeping a reference
+ // to the entry that owns the buffer. Make this node a copy of the one that
+ // we have, and let the iterator logic update it when the entry changes.
+ CacheRankingsBlock temp(NULL, Addr(0));
+ *temp.Data() = *rankings->Data();
+ rankings->StopSharingData();
+ *rankings->Data() = *temp.Data();
+}
+
+void Rankings::CompleteTransaction() {
+ Addr node_addr(static_cast<CacheAddr>(control_data_->transaction));
+ if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid rankings info.";
+ return;
+ }
+
+ Trace("CompleteTransaction 0x%x", node_addr.value());
+
+ CacheRankingsBlock node(backend_->File(node_addr), node_addr);
+ if (!node.Load())
+ return;
+
+ node.Data()->dummy = 0;
+ node.Store();
+
+ Addr& my_head = heads_[control_data_->operation_list];
+ Addr& my_tail = tails_[control_data_->operation_list];
+
+ // We want to leave the node inside the list. The entry must me marked as
+ // dirty, and will be removed later. Otherwise, we'll get assertions when
+ // attempting to remove the dirty entry.
+ if (INSERT == control_data_->operation) {
+ Trace("FinishInsert h:0x%x t:0x%x", my_head.value(), my_tail.value());
+ FinishInsert(&node);
+ } else if (REMOVE == control_data_->operation) {
+ Trace("RevertRemove h:0x%x t:0x%x", my_head.value(), my_tail.value());
+ RevertRemove(&node);
+ } else {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid operation to recover.";
+ }
+}
+
+void Rankings::FinishInsert(CacheRankingsBlock* node) {
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
+ Addr& my_head = heads_[control_data_->operation_list];
+ Addr& my_tail = tails_[control_data_->operation_list];
+ if (my_head.value() != node->address().value()) {
+ if (my_tail.value() == node->address().value()) {
+ // This part will be skipped by the logic of Insert.
+ node->Data()->next = my_tail.value();
+ }
+
+ Insert(node, true, static_cast<List>(control_data_->operation_list));
+ }
+
+ // Tell the backend about this entry.
+ backend_->RecoveredEntry(node);
+}
+
+void Rankings::RevertRemove(CacheRankingsBlock* node) {
+ Addr next_addr(node->Data()->next);
+ Addr prev_addr(node->Data()->prev);
+ if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
+ // The operation actually finished. Nothing to do.
+ control_data_->transaction = 0;
+ return;
+ }
+ if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(WARNING) << "Invalid rankings info.";
+ control_data_->transaction = 0;
+ return;
+ }
+
+ CacheRankingsBlock next(backend_->File(next_addr), next_addr);
+ CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
+ if (!next.Load() || !prev.Load())
+ return;
+
+ CacheAddr node_value = node->address().value();
+ DCHECK(prev.Data()->next == node_value ||
+ prev.Data()->next == prev_addr.value() ||
+ prev.Data()->next == next.address().value());
+ DCHECK(next.Data()->prev == node_value ||
+ next.Data()->prev == next_addr.value() ||
+ next.Data()->prev == prev.address().value());
+
+ if (node_value != prev_addr.value())
+ prev.Data()->next = node_value;
+ if (node_value != next_addr.value())
+ next.Data()->prev = node_value;
+
+ List my_list = static_cast<List>(control_data_->operation_list);
+ Addr& my_head = heads_[my_list];
+ Addr& my_tail = tails_[my_list];
+ if (!my_head.is_initialized() || !my_tail.is_initialized()) {
+ my_head.set_value(node_value);
+ my_tail.set_value(node_value);
+ WriteHead(my_list);
+ WriteTail(my_list);
+ } else if (my_head.value() == next.address().value()) {
+ my_head.set_value(node_value);
+ prev.Data()->next = next.address().value();
+ WriteHead(my_list);
+ } else if (my_tail.value() == prev.address().value()) {
+ my_tail.set_value(node_value);
+ next.Data()->prev = prev.address().value();
+ WriteTail(my_list);
+ }
+
+ next.Store();
+ prev.Store();
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
+}
+
bool Rankings::CheckEntry(CacheRankingsBlock* rankings) {
if (!rankings->Data()->dummy)
return true;
diff --git a/net/disk_cache/stats.cc b/net/disk_cache/stats.cc
index 5222112..d9a9d12 100644
--- a/net/disk_cache/stats.cc
+++ b/net/disk_cache/stats.cc
@@ -116,6 +116,12 @@ bool CreateStats(BackendImpl* backend, Addr* address, OnDiskStats* stats) {
return StoreStats(backend, *address, stats);
}
+Stats::Stats() : backend_(NULL) {
+}
+
+Stats::~Stats() {
+}
+
bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
OnDiskStats stats;
Addr address(*storage_addr);
@@ -153,86 +159,6 @@ bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
return true;
}
-Stats::Stats() : backend_(NULL) {
-}
-
-Stats::~Stats() {
-}
-
-// The array will be filled this way:
-// index size
-// 0 [0, 1024)
-// 1 [1024, 2048)
-// 2 [2048, 4096)
-// 3 [4K, 6K)
-// ...
-// 10 [18K, 20K)
-// 11 [20K, 24K)
-// 12 [24k, 28K)
-// ...
-// 15 [36k, 40K)
-// 16 [40k, 64K)
-// 17 [64K, 128K)
-// 18 [128K, 256K)
-// ...
-// 23 [4M, 8M)
-// 24 [8M, 16M)
-// 25 [16M, 32M)
-// 26 [32M, 64M)
-// 27 [64M, ...)
-int Stats::GetStatsBucket(int32 size) {
- if (size < 1024)
- return 0;
-
- // 10 slots more, until 20K.
- if (size < 20 * 1024)
- return size / 2048 + 1;
-
- // 5 slots more, from 20K to 40K.
- if (size < 40 * 1024)
- return (size - 20 * 1024) / 4096 + 11;
-
- // From this point on, use a logarithmic scale.
- int result = LogBase2(size) + 1;
-
- COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
- if (result >= kDataSizesLength)
- result = kDataSizesLength - 1;
-
- return result;
-}
-
-int Stats::GetBucketRange(size_t i) const {
- if (i < 2)
- return static_cast<int>(1024 * i);
-
- if (i < 12)
- return static_cast<int>(2048 * (i - 1));
-
- if (i < 17)
- return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
-
- int n = 64 * 1024;
- if (i > static_cast<size_t>(kDataSizesLength)) {
- NOTREACHED();
- i = kDataSizesLength;
- }
-
- i -= 17;
- n <<= i;
- return n;
-}
-
-void Stats::Snapshot(StatsHistogram::StatsSamples* samples) const {
- samples->GetCounts()->resize(kDataSizesLength);
- for (int i = 0; i < kDataSizesLength; i++) {
- int count = data_sizes_[i];
- if (count < 0)
- count = 0;
- samples->GetCounts()->at(i) = count;
- }
-}
-
void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
// We keep a counter of the data block size on an array where each entry is
// the adjusted log base 2 of the size. The first entry counts blocks of 256
@@ -286,15 +212,6 @@ int Stats::GetResurrectRatio() const {
return GetRatio(RESURRECT_HIT, CREATE_HIT);
}
-int Stats::GetRatio(Counters hit, Counters miss) const {
- int64 ratio = GetCounter(hit) * 100;
- if (!ratio)
- return 0;
-
- ratio /= (GetCounter(hit) + GetCounter(miss));
- return static_cast<int>(ratio);
-}
-
void Stats::ResetRatios() {
SetCounter(OPEN_HIT, 0);
SetCounter(OPEN_MISS, 0);
@@ -326,4 +243,87 @@ void Stats::Store() {
StoreStats(backend_, address, &stats);
}
+int Stats::GetBucketRange(size_t i) const {
+ if (i < 2)
+ return static_cast<int>(1024 * i);
+
+ if (i < 12)
+ return static_cast<int>(2048 * (i - 1));
+
+ if (i < 17)
+ return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
+
+ int n = 64 * 1024;
+ if (i > static_cast<size_t>(kDataSizesLength)) {
+ NOTREACHED();
+ i = kDataSizesLength;
+ }
+
+ i -= 17;
+ n <<= i;
+ return n;
+}
+
+void Stats::Snapshot(StatsHistogram::StatsSamples* samples) const {
+ samples->GetCounts()->resize(kDataSizesLength);
+ for (int i = 0; i < kDataSizesLength; i++) {
+ int count = data_sizes_[i];
+ if (count < 0)
+ count = 0;
+ samples->GetCounts()->at(i) = count;
+ }
+}
+
+// The array will be filled this way:
+// index size
+// 0 [0, 1024)
+// 1 [1024, 2048)
+// 2 [2048, 4096)
+// 3 [4K, 6K)
+// ...
+// 10 [18K, 20K)
+// 11 [20K, 24K)
+// 12 [24k, 28K)
+// ...
+// 15 [36k, 40K)
+// 16 [40k, 64K)
+// 17 [64K, 128K)
+// 18 [128K, 256K)
+// ...
+// 23 [4M, 8M)
+// 24 [8M, 16M)
+// 25 [16M, 32M)
+// 26 [32M, 64M)
+// 27 [64M, ...)
+int Stats::GetStatsBucket(int32 size) {
+ if (size < 1024)
+ return 0;
+
+ // 10 slots more, until 20K.
+ if (size < 20 * 1024)
+ return size / 2048 + 1;
+
+ // 5 slots more, from 20K to 40K.
+ if (size < 40 * 1024)
+ return (size - 20 * 1024) / 4096 + 11;
+
+ // From this point on, use a logarithmic scale.
+ int result = LogBase2(size) + 1;
+
+ COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
+ if (result >= kDataSizesLength)
+ result = kDataSizesLength - 1;
+
+ return result;
+}
+
+int Stats::GetRatio(Counters hit, Counters miss) const {
+ int64 ratio = GetCounter(hit) * 100;
+ if (!ratio)
+ return 0;
+
+ ratio /= (GetCounter(hit) + GetCounter(miss));
+ return static_cast<int>(ratio);
+}
+
} // namespace disk_cache