summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--base/time/time.h8
-rw-r--r--base/time/time_unittest.cc9
-rw-r--r--content/browser/dom_storage/dom_storage_area.cc125
-rw-r--r--content/browser/dom_storage/dom_storage_area.h38
-rw-r--r--content/browser/dom_storage/dom_storage_area_unittest.cc47
-rw-r--r--content/browser/dom_storage/dom_storage_namespace.cc17
-rw-r--r--content/common/dom_storage/dom_storage_map.cc21
-rw-r--r--content/common/dom_storage/dom_storage_map.h2
-rw-r--r--tools/metrics/histograms/histograms.xml26
9 files changed, 245 insertions, 48 deletions
diff --git a/base/time/time.h b/base/time/time.h
index 18de085..6d61861 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -178,6 +178,14 @@ class BASE_EXPORT TimeDelta {
return delta_ / a.delta_;
}
+ // Multiplicative computations with floats.
+ TimeDelta multiply_by(double a) const {
+ return TimeDelta(delta_ * a);
+ }
+ TimeDelta divide_by(double a) const {
+ return TimeDelta(delta_ / a);
+ }
+
// Defined below because it depends on the definition of the other classes.
Time operator+(Time t) const;
TimeTicks operator+(TimeTicks t) const;
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
index fdac59d..6387ec7 100644
--- a/base/time/time_unittest.cc
+++ b/base/time/time_unittest.cc
@@ -867,6 +867,15 @@ TEST(TimeDelta, Magnitude) {
TimeDelta::FromMicroseconds(min_int64_plus_two).magnitude());
}
+
+TEST(TimeDelta, multiply_by) {
+ double d = 0.5;
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000).multiply_by(d));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000).divide_by(d));
+}
+
TEST(TimeDeltaLogging, DCheckEqCompiles) {
DCHECK_EQ(TimeDelta(), TimeDelta());
}
diff --git a/content/browser/dom_storage/dom_storage_area.cc b/content/browser/dom_storage/dom_storage_area.cc
index 90a55a0..a2611eb 100644
--- a/content/browser/dom_storage/dom_storage_area.cc
+++ b/content/browser/dom_storage/dom_storage_area.cc
@@ -4,10 +4,13 @@
#include "content/browser/dom_storage/dom_storage_area.h"
+#include <algorithm>
+
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/process/process_info.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
#include "content/browser/dom_storage/dom_storage_namespace.h"
@@ -25,13 +28,44 @@ using storage::DatabaseUtil;
namespace content {
-static const int kCommitTimerSeconds = 1;
+namespace {
+
+// Delay for a moment after a value is set in anticipation
+// of other values being set, so changes are batched.
+const int kCommitDefaultDelaySecs = 5;
+
+// To avoid excessive IO we apply limits to the amount of data being written
+// and the frequency of writes. The specific values used are somewhat arbitrary.
+const int kMaxBytesPerDay = kPerStorageAreaQuota * 2;
+const int kMaxCommitsPerHour = 6;
+
+} // namespace
+
+DOMStorageArea::RateLimiter::RateLimiter(size_t desired_rate,
+ base::TimeDelta time_quantum)
+ : rate_(desired_rate), samples_(0), time_quantum_(time_quantum) {
+ DCHECK_GT(desired_rate, 0ul);
+}
+
+base::TimeDelta DOMStorageArea::RateLimiter::ComputeTimeNeeded() const {
+ return time_quantum_.multiply_by(samples_ / rate_);
+}
+
+base::TimeDelta DOMStorageArea::RateLimiter::ComputeDelayNeeded(
+ const base::TimeDelta elapsed_time) const {
+ base::TimeDelta time_needed = ComputeTimeNeeded();
+ if (time_needed > elapsed_time)
+ return time_needed - elapsed_time;
+ return base::TimeDelta();
+}
-DOMStorageArea::CommitBatch::CommitBatch()
- : clear_all_first(false) {
+DOMStorageArea::CommitBatch::CommitBatch() : clear_all_first(false) {
}
DOMStorageArea::CommitBatch::~CommitBatch() {}
+size_t DOMStorageArea::CommitBatch::GetDataSize() const {
+ return DOMStorageMap::CountBytes(changed_values);
+}
// static
const base::FilePath::CharType DOMStorageArea::kDatabaseFileExtension[] =
@@ -55,17 +89,21 @@ GURL DOMStorageArea::OriginFromDatabaseFileName(const base::FilePath& name) {
return storage::GetOriginFromIdentifier(origin_id);
}
-DOMStorageArea::DOMStorageArea(
- const GURL& origin, const base::FilePath& directory,
- DOMStorageTaskRunner* task_runner)
- : namespace_id_(kLocalStorageNamespaceId), origin_(origin),
+DOMStorageArea::DOMStorageArea(const GURL& origin,
+ const base::FilePath& directory,
+ DOMStorageTaskRunner* task_runner)
+ : namespace_id_(kLocalStorageNamespaceId),
+ origin_(origin),
directory_(directory),
task_runner_(task_runner),
map_(new DOMStorageMap(kPerStorageAreaQuota +
kPerStorageAreaOverQuotaAllowance)),
is_initial_import_done_(true),
is_shutdown_(false),
- commit_batches_in_flight_(0) {
+ commit_batches_in_flight_(0),
+ start_time_(base::TimeTicks::Now()),
+ data_rate_limiter_(kMaxBytesPerDay, base::TimeDelta::FromHours(24)),
+ commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) {
if (!directory.empty()) {
base::FilePath path = directory.Append(DatabaseFileNameFromOrigin(origin_));
backing_.reset(new LocalStorageDatabaseAdapter(path));
@@ -73,12 +111,11 @@ DOMStorageArea::DOMStorageArea(
}
}
-DOMStorageArea::DOMStorageArea(
- int64 namespace_id,
- const std::string& persistent_namespace_id,
- const GURL& origin,
- SessionStorageDatabase* session_storage_backing,
- DOMStorageTaskRunner* task_runner)
+DOMStorageArea::DOMStorageArea(int64 namespace_id,
+ const std::string& persistent_namespace_id,
+ const GURL& origin,
+ SessionStorageDatabase* session_storage_backing,
+ DOMStorageTaskRunner* task_runner)
: namespace_id_(namespace_id),
persistent_namespace_id_(persistent_namespace_id),
origin_(origin),
@@ -88,7 +125,10 @@ DOMStorageArea::DOMStorageArea(
session_storage_backing_(session_storage_backing),
is_initial_import_done_(true),
is_shutdown_(false),
- commit_batches_in_flight_(0) {
+ commit_batches_in_flight_(0),
+ start_time_(base::TimeTicks::Now()),
+ data_rate_limiter_(kMaxBytesPerDay, base::TimeDelta::FromHours(24)),
+ commit_rate_limiter_(kMaxCommitsPerHour, base::TimeDelta::FromHours(1)) {
DCHECK(namespace_id != kLocalStorageNamespaceId);
if (session_storage_backing) {
backing_.reset(new SessionStorageDatabaseAdapter(
@@ -137,7 +177,8 @@ bool DOMStorageArea::SetItem(const base::string16& key,
if (!map_->HasOneRef())
map_ = map_->DeepCopy();
bool success = map_->SetItem(key, value, old_value);
- if (success && backing_) {
+ if (success && backing_ &&
+ (old_value->is_null() || old_value->string() != value)) {
CommitBatch* commit_batch = CreateCommitBatchIfNeeded();
commit_batch->changed_values[key] = base::NullableString16(value, false);
}
@@ -212,19 +253,21 @@ DOMStorageArea* DOMStorageArea::ShallowCopy(
copy->is_initial_import_done_ = true;
// All the uncommitted changes to this area need to happen before the actual
- // shallow copy is made (scheduled by the upper layer). Another OnCommitTimer
- // call might be in the event queue at this point, but it's handled gracefully
- // when it fires.
+ // shallow copy is made (scheduled by the upper layer sometime after return).
if (commit_batch_)
- OnCommitTimer();
+ ScheduleImmediateCommit();
return copy;
}
bool DOMStorageArea::HasUncommittedChanges() const {
- DCHECK(!is_shutdown_);
return commit_batch_.get() || commit_batches_in_flight_;
}
+void DOMStorageArea::ScheduleImmediateCommit() {
+ DCHECK(HasUncommittedChanges());
+ PostCommitTask();
+}
+
void DOMStorageArea::DeleteOrigin() {
DCHECK(!is_shutdown_);
// This function shouldn't be called for sessionStorage.
@@ -327,25 +370,44 @@ DOMStorageArea::CommitBatch* DOMStorageArea::CreateCommitBatchIfNeeded() {
// started after the commits have happened.
if (!commit_batches_in_flight_) {
task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&DOMStorageArea::OnCommitTimer, this),
- base::TimeDelta::FromSeconds(kCommitTimerSeconds));
+ FROM_HERE, base::Bind(&DOMStorageArea::OnCommitTimer, this),
+ ComputeCommitDelay());
}
}
return commit_batch_.get();
}
+base::TimeDelta DOMStorageArea::ComputeCommitDelay() const {
+ base::TimeDelta elapsed_time = base::TimeTicks::Now() - start_time_;
+ base::TimeDelta delay = std::max(
+ base::TimeDelta::FromSeconds(kCommitDefaultDelaySecs),
+ std::max(commit_rate_limiter_.ComputeDelayNeeded(elapsed_time),
+ data_rate_limiter_.ComputeDelayNeeded(elapsed_time)));
+ UMA_HISTOGRAM_LONG_TIMES("LocalStorage.CommitDelay", delay);
+ return delay;
+}
+
void DOMStorageArea::OnCommitTimer() {
if (is_shutdown_)
return;
- DCHECK(backing_.get());
-
- // It's possible that there is nothing to commit, since a shallow copy occured
- // before the timer fired.
+ // It's possible that there is nothing to commit if an immediate
+ // commit occured after the timer was scheduled but before it fired.
if (!commit_batch_)
return;
+ PostCommitTask();
+}
+
+void DOMStorageArea::PostCommitTask() {
+ if (is_shutdown_ || !commit_batch_)
+ return;
+
+ DCHECK(backing_.get());
+
+ commit_rate_limiter_.add_samples(1);
+ data_rate_limiter_.add_samples(commit_batch_->GetDataSize());
+
// This method executes on the primary sequence, we schedule
// a task for immediate execution on the commit sequence.
DCHECK(task_runner_->IsRunningOnPrimarySequence());
@@ -362,7 +424,7 @@ void DOMStorageArea::CommitChanges(const CommitBatch* commit_batch) {
// This method executes on the commit sequence.
DCHECK(task_runner_->IsRunningOnCommitSequence());
backing_->CommitChanges(commit_batch->clear_all_first,
- commit_batch->changed_values);
+ commit_batch->changed_values);
// TODO(michaeln): what if CommitChanges returns false (e.g., we're trying to
// commit to a DB which is in an inconsistent state?)
task_runner_->PostTask(
@@ -379,9 +441,8 @@ void DOMStorageArea::OnCommitComplete() {
if (commit_batch_.get() && !commit_batches_in_flight_) {
// More changes have accrued, restart the timer.
task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&DOMStorageArea::OnCommitTimer, this),
- base::TimeDelta::FromSeconds(kCommitTimerSeconds));
+ FROM_HERE, base::Bind(&DOMStorageArea::OnCommitTimer, this),
+ ComputeCommitDelay());
}
}
diff --git a/content/browser/dom_storage/dom_storage_area.h b/content/browser/dom_storage/dom_storage_area.h
index ca28be1..d094789 100644
--- a/content/browser/dom_storage/dom_storage_area.h
+++ b/content/browser/dom_storage/dom_storage_area.h
@@ -5,6 +5,8 @@
#ifndef CONTENT_BROWSER_DOM_STORAGE_DOM_STORAGE_AREA_H_
#define CONTENT_BROWSER_DOM_STORAGE_DOM_STORAGE_AREA_H_
+#include <string>
+
#include "base/files/file_path.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
@@ -65,6 +67,7 @@ class CONTENT_EXPORT DOMStorageArea
const std::string& destination_persistent_namespace_id);
bool HasUncommittedChanges() const;
+ void ScheduleImmediateCommit();
// Similar to Clear() but more optimized for just deleting
// without raising events.
@@ -92,14 +95,42 @@ class CONTENT_EXPORT DOMStorageArea
FRIEND_TEST_ALL_PREFIXES(DOMStorageAreaTest, CommitChangesAtShutdown);
FRIEND_TEST_ALL_PREFIXES(DOMStorageAreaTest, DeleteOrigin);
FRIEND_TEST_ALL_PREFIXES(DOMStorageAreaTest, PurgeMemory);
+ FRIEND_TEST_ALL_PREFIXES(DOMStorageAreaTest, RateLimiter);
FRIEND_TEST_ALL_PREFIXES(DOMStorageContextImplTest, PersistentIds);
friend class base::RefCountedThreadSafe<DOMStorageArea>;
+ // Used to rate limit commits.
+ class CONTENT_EXPORT RateLimiter {
+ public:
+ RateLimiter(size_t desired_rate, base::TimeDelta time_quantum);
+
+ void add_samples(size_t samples) {
+ samples_ += samples;
+ }
+
+ // Computes the total time needed to process the total samples seen
+ // at the desired rate.
+ base::TimeDelta ComputeTimeNeeded() const;
+
+ // Given the elapsed time since the start of the rate limiting session,
+ // computes the delay needed to mimic having processed the total samples
+ // seen at the desired rate.
+ base::TimeDelta ComputeDelayNeeded(
+ const base::TimeDelta elapsed_time) const;
+
+ private:
+ float rate_;
+ float samples_;
+ base::TimeDelta time_quantum_;
+ };
+
struct CommitBatch {
bool clear_all_first;
DOMStorageValuesMap changed_values;
+
CommitBatch();
~CommitBatch();
+ size_t GetDataSize() const;
};
~DOMStorageArea();
@@ -114,8 +145,10 @@ class CONTENT_EXPORT DOMStorageArea
// task sequence when complete.
CommitBatch* CreateCommitBatchIfNeeded();
void OnCommitTimer();
+ void PostCommitTask();
void CommitChanges(const CommitBatch* commit_batch);
void OnCommitComplete();
+ base::TimeDelta ComputeCommitDelay() const;
void ShutdownInCommitSequence();
@@ -131,6 +164,11 @@ class CONTENT_EXPORT DOMStorageArea
bool is_shutdown_;
scoped_ptr<CommitBatch> commit_batch_;
int commit_batches_in_flight_;
+ base::TimeTicks start_time_;
+ RateLimiter data_rate_limiter_;
+ RateLimiter commit_rate_limiter_;
+
+ DISALLOW_COPY_AND_ASSIGN(DOMStorageArea);
};
} // namespace content
diff --git a/content/browser/dom_storage/dom_storage_area_unittest.cc b/content/browser/dom_storage/dom_storage_area_unittest.cc
index c074cc0..1115590 100644
--- a/content/browser/dom_storage/dom_storage_area_unittest.cc
+++ b/content/browser/dom_storage/dom_storage_area_unittest.cc
@@ -472,4 +472,51 @@ TEST_F(DOMStorageAreaTest, DatabaseFileNames) {
base::FilePath().AppendASCII(".extensiononly")));
}
+TEST_F(DOMStorageAreaTest, RateLimiter) {
+ // Limit to 1000 samples per second
+ DOMStorageArea::RateLimiter rate_limiter(
+ 1000, base::TimeDelta::FromSeconds(1));
+
+ // No samples have been added so no time/delay should be needed.
+ EXPECT_EQ(base::TimeDelta(),
+ rate_limiter.ComputeTimeNeeded());
+ EXPECT_EQ(base::TimeDelta(),
+ rate_limiter.ComputeDelayNeeded(base::TimeDelta()));
+ EXPECT_EQ(base::TimeDelta(),
+ rate_limiter.ComputeDelayNeeded(base::TimeDelta::FromDays(1)));
+
+ // Add a seconds worth of samples.
+ rate_limiter.add_samples(1000);
+ EXPECT_EQ(base::TimeDelta::FromSeconds(1),
+ rate_limiter.ComputeTimeNeeded());
+ EXPECT_EQ(base::TimeDelta::FromSeconds(1),
+ rate_limiter.ComputeDelayNeeded(base::TimeDelta()));
+ EXPECT_EQ(base::TimeDelta(),
+ rate_limiter.ComputeDelayNeeded(base::TimeDelta::FromSeconds(1)));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(250),
+ rate_limiter.ComputeDelayNeeded(
+ base::TimeDelta::FromMilliseconds(750)));
+ EXPECT_EQ(base::TimeDelta(),
+ rate_limiter.ComputeDelayNeeded(
+ base::TimeDelta::FromDays(1)));
+
+ // And another half seconds worth.
+ rate_limiter.add_samples(500);
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(1500),
+ rate_limiter.ComputeTimeNeeded());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(1500),
+ rate_limiter.ComputeDelayNeeded(base::TimeDelta()));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(500),
+ rate_limiter.ComputeDelayNeeded(base::TimeDelta::FromSeconds(1)));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(750),
+ rate_limiter.ComputeDelayNeeded(
+ base::TimeDelta::FromMilliseconds(750)));
+ EXPECT_EQ(base::TimeDelta(),
+ rate_limiter.ComputeDelayNeeded(
+ base::TimeDelta::FromMilliseconds(1500)));
+ EXPECT_EQ(base::TimeDelta(),
+ rate_limiter.ComputeDelayNeeded(
+ base::TimeDelta::FromDays(1)));
+}
+
} // namespace content
diff --git a/content/browser/dom_storage/dom_storage_namespace.cc b/content/browser/dom_storage/dom_storage_namespace.cc
index 75bfdaa..fc85d83 100644
--- a/content/browser/dom_storage/dom_storage_namespace.cc
+++ b/content/browser/dom_storage/dom_storage_namespace.cc
@@ -123,16 +123,23 @@ void DOMStorageNamespace::PurgeMemory(PurgeOption option) {
return; // We can't purge w/o backing on disk.
AreaMap::iterator it = areas_.begin();
while (it != areas_.end()) {
- // Leave it alone if changes are pending
- if (it->second.area_->HasUncommittedChanges()) {
+ const AreaHolder& holder = it->second;
+
+ // We can't purge if there are changes pending.
+ if (holder.area_->HasUncommittedChanges()) {
+ if (holder.open_count_ == 0) {
+ // Schedule an immediate commit so the next time we're asked to purge,
+ // we can drop it from memory.
+ holder.area_->ScheduleImmediateCommit();
+ }
++it;
continue;
}
// If not in use, we can shut it down and remove
// it from our collection entirely.
- if (it->second.open_count_ == 0) {
- it->second.area_->Shutdown();
+ if (holder.open_count_ == 0) {
+ holder.area_->Shutdown();
areas_.erase(it++);
continue;
}
@@ -140,7 +147,7 @@ void DOMStorageNamespace::PurgeMemory(PurgeOption option) {
if (option == PURGE_AGGRESSIVE) {
// If aggressive is true, we clear caches and such
// for opened areas.
- it->second.area_->PurgeMemory();
+ holder.area_->PurgeMemory();
}
++it;
diff --git a/content/common/dom_storage/dom_storage_map.cc b/content/common/dom_storage/dom_storage_map.cc
index de33a52..71368bd 100644
--- a/content/common/dom_storage/dom_storage_map.cc
+++ b/content/common/dom_storage/dom_storage_map.cc
@@ -14,17 +14,6 @@ size_t size_of_item(const base::string16& key, const base::string16& value) {
return (key.length() + value.length()) * sizeof(base::char16);
}
-size_t CountBytes(const DOMStorageValuesMap& values) {
- if (values.size() == 0)
- return 0;
-
- size_t count = 0;
- DOMStorageValuesMap::const_iterator it = values.begin();
- for (; it != values.end(); ++it)
- count += size_of_item(it->first, it->second.string());
- return count;
-}
-
} // namespace
DOMStorageMap::DOMStorageMap(size_t quota)
@@ -119,4 +108,14 @@ void DOMStorageMap::ResetKeyIterator() {
last_key_index_ = 0;
}
+size_t DOMStorageMap::CountBytes(const DOMStorageValuesMap& values) {
+ if (values.empty())
+ return 0;
+
+ size_t count = 0;
+ for (const auto& pair : values)
+ count += size_of_item(pair.first, pair.second.string());
+ return count;
+}
+
} // namespace content
diff --git a/content/common/dom_storage/dom_storage_map.h b/content/common/dom_storage/dom_storage_map.h
index 140de07..7130d27 100644
--- a/content/common/dom_storage/dom_storage_map.h
+++ b/content/common/dom_storage/dom_storage_map.h
@@ -46,6 +46,8 @@ class CONTENT_EXPORT DOMStorageMap
size_t quota() const { return quota_; }
void set_quota(size_t quota) { quota_ = quota; }
+ static size_t CountBytes(const DOMStorageValuesMap& values);
+
private:
friend class base::RefCountedThreadSafe<DOMStorageMap>;
~DOMStorageMap();
diff --git a/tools/metrics/histograms/histograms.xml b/tools/metrics/histograms/histograms.xml
index c292101..21b307d 100644
--- a/tools/metrics/histograms/histograms.xml
+++ b/tools/metrics/histograms/histograms.xml
@@ -5618,6 +5618,9 @@ Therefore, the affected-histogram name has to have at least one dot in it.
</histogram>
<histogram name="DOMStorage.clear" units="milliseconds">
+ <obsolete>
+ Deprecated 2012.
+ </obsolete>
<owner>michaeln@chromium.org</owner>
<summary>
Duration to execute localStorage.clear() or sessionStorage.clear().
@@ -5625,6 +5628,9 @@ Therefore, the affected-histogram name has to have at least one dot in it.
</histogram>
<histogram name="DOMStorage.getItem" units="milliseconds">
+ <obsolete>
+ Deprecated 2012.
+ </obsolete>
<owner>michaeln@chromium.org</owner>
<summary>
Duration to execute localStorage.getItem() or sessionStorage.getItem().
@@ -5632,6 +5638,9 @@ Therefore, the affected-histogram name has to have at least one dot in it.
</histogram>
<histogram name="DOMStorage.key" units="milliseconds">
+ <obsolete>
+ Deprecated 2012.
+ </obsolete>
<owner>michaeln@chromium.org</owner>
<summary>
Duration to execute localStorage.key() or sessionStorage.key().
@@ -5639,6 +5648,9 @@ Therefore, the affected-histogram name has to have at least one dot in it.
</histogram>
<histogram name="DOMStorage.length" units="milliseconds">
+ <obsolete>
+ Deprecated 2012.
+ </obsolete>
<owner>michaeln@chromium.org</owner>
<summary>
Duration to execute localStorage.length() or sessionStorage.length().
@@ -5646,6 +5658,9 @@ Therefore, the affected-histogram name has to have at least one dot in it.
</histogram>
<histogram name="DOMStorage.removeItem" units="milliseconds">
+ <obsolete>
+ Deprecated 2012.
+ </obsolete>
<owner>michaeln@chromium.org</owner>
<summary>
Duration to execute localStorage.removeItem() or
@@ -5654,6 +5669,9 @@ Therefore, the affected-histogram name has to have at least one dot in it.
</histogram>
<histogram name="DOMStorage.setItem" units="milliseconds">
+ <obsolete>
+ Deprecated 2012.
+ </obsolete>
<owner>michaeln@chromium.org</owner>
<summary>
Duration to execute localStorage.setItem() or sessionStorage.setItem().
@@ -12531,6 +12549,14 @@ Therefore, the affected-histogram name has to have at least one dot in it.
</summary>
</histogram>
+<histogram name="LocalStorage.CommitDelay">
+ <owner>michaeln@chromium.org</owner>
+ <summary>
+ Delay between a page making changes and those changes being written to the
+ DB.
+ </summary>
+</histogram>
+
<histogram name="LocalStorage.RendererLocalStorageSizeInKB" units="KB">
<owner>michaeln@chromium.org</owner>
<summary>