summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
authorpkasting@chromium.org <pkasting@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-30 20:42:27 +0000
committerpkasting@chromium.org <pkasting@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-30 20:42:27 +0000
commit57a336aa8b9a591e42fadcae359b20d596af7526 (patch)
tree74c2068e69aaa70c2040e62d8d14dc3583246c2b /base
parent0003f509cd63c23518dd20e62e871807ae973065 (diff)
downloadchromium_src-57a336aa8b9a591e42fadcae359b20d596af7526.zip
chromium_src-57a336aa8b9a591e42fadcae359b20d596af7526.tar.gz
chromium_src-57a336aa8b9a591e42fadcae359b20d596af7526.tar.bz2
Remove all MemoryModel-related code, since we're unlikely to use it again. Also changes some sample code to use a random generic name ("PruningAlgorithm") instead of MemoryModel in hopes that no one will be confused.
This does not remove IdleTimer, even though it now has no users. I do plan to remove it, but I want to do that in a separate commit so it's easy to reference and revert if at some later point we want this functionality. BUG=none TEST=none Review URL: http://codereview.chromium.org/242079 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@27652 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base')
-rw-r--r--base/field_trial.h6
-rw-r--r--base/process.h18
-rw-r--r--base/process_posix.cc15
-rw-r--r--base/process_win.cc71
4 files changed, 3 insertions, 107 deletions
diff --git a/base/field_trial.h b/base/field_trial.h
index 82bf3b36..0779981 100644
--- a/base/field_trial.h
+++ b/base/field_trial.h
@@ -22,7 +22,7 @@
//------------------------------------------------------------------------------
// Example: Suppose we have an experiment involving memory, such as determining
-// the impact of memory model command line flags actual memory use.
+// the impact of some pruning algorithm.
// We assume that we already have a histogram of memory usage, such as:
// HISTOGRAM_COUNTS("Memory.RendererTotal", count);
@@ -38,9 +38,9 @@
// int group2 = trial->AppendGroup("_low_mem", 20); // 2% in _low_mem group.
// // Take action depending of which group we randomly land in.
// if (trial->group() == group1)
-// SetMemoryModel(HIGH); // Sample setting of browser state.
+// SetPruningAlgorithm(kType1); // Sample setting of browser state.
// else if (trial->group() == group2)
-// SetMemoryModel(LOW); // Sample alternate setting.
+// SetPruningAlgorithm(kType2); // Sample alternate setting.
// We then modify any histograms we wish to correlate with our experiment to
// have slighly different names, depending on what group the trial instance
diff --git a/base/process.h b/base/process.h
index 459ad58..fa076b9 100644
--- a/base/process.h
+++ b/base/process.h
@@ -66,24 +66,6 @@ class Process {
// Returns true if the priority was changed, false otherwise.
bool SetProcessBackgrounded(bool value);
- // Reduces the working set of memory used by the process.
- // The algorithm used by this function is intentionally vague. Repeated calls
- // to this function consider the process' previous required Working Set sizes
- // to determine a reasonable reduction. This helps give memory back to the OS
- // in increments without over releasing memory.
- // When the WorkingSet is reduced, it is permanent, until the caller calls
- // UnReduceWorkingSet.
- // Returns true if successful, false otherwise.
- bool ReduceWorkingSet();
-
- // Undoes the effects of prior calls to ReduceWorkingSet().
- // Returns true if successful, false otherwise.
- bool UnReduceWorkingSet();
-
- // Releases as much of the working set back to the OS as possible.
- // Returns true if successful, false otherwise.
- bool EmptyWorkingSet();
-
private:
ProcessHandle process_;
size_t last_working_set_size_;
diff --git a/base/process_posix.cc b/base/process_posix.cc
index c9a7dfc..f0e019a 100644
--- a/base/process_posix.cc
+++ b/base/process_posix.cc
@@ -36,21 +36,6 @@ bool Process::SetProcessBackgrounded(bool value) {
return true;
}
-bool Process::ReduceWorkingSet() {
- // http://code.google.com/p/chromium/issues/detail?id=8083
- return false;
-}
-
-bool Process::UnReduceWorkingSet() {
- // http://code.google.com/p/chromium/issues/detail?id=8083
- return false;
-}
-
-bool Process::EmptyWorkingSet() {
- // http://code.google.com/p/chromium/issues/detail?id=8083
- return false;
-}
-
ProcessId Process::pid() const {
if (process_ == 0)
return 0;
diff --git a/base/process_win.cc b/base/process_win.cc
index 263e577..469e7cd 100644
--- a/base/process_win.cc
+++ b/base/process_win.cc
@@ -38,77 +38,6 @@ bool Process::SetProcessBackgrounded(bool value) {
return (SetPriorityClass(process_, priority) != 0);
}
-// According to MSDN, these are the default values which XP
-// uses to govern working set soft limits.
-// http://msdn.microsoft.com/en-us/library/ms686234.aspx
-static const int kWinDefaultMinSet = 50 * 4096;
-static const int kWinDefaultMaxSet = 345 * 4096;
-static const int kDampingFactor = 2;
-
-bool Process::ReduceWorkingSet() {
- if (!process_)
- return false;
- // The idea here is that when we the process' working set has gone
- // down, we want to release those pages to the OS quickly. However,
- // when it is not going down, we want to be careful not to release
- // too much back to the OS, as it could cause additional paging.
-
- // We use a damping function to lessen the working set over time.
- // As the process grows/shrinks, this algorithm will lag with
- // working set reduction.
- //
- // The intended algorithm is:
- // TargetWorkingSetSize = (LastWorkingSet/2 + CurrentWorkingSet) /2
-
- scoped_ptr<ProcessMetrics> metrics(
- ProcessMetrics::CreateProcessMetrics(process_));
- WorkingSetKBytes working_set;
- if (!metrics->GetWorkingSetKBytes(&working_set))
- return false;
-
-
- // We want to compute the amount of working set that the process
- // needs to keep in memory. Since other processes contain the
- // pages which are shared, we don't need to reserve them in our
- // process, the system already has them tagged. Keep in mind, we
- // don't get to control *which* pages get released, but if we
- // assume reasonable distribution of pages, this should generally
- // be the right value.
- size_t current_working_set_size = working_set.priv +
- working_set.shareable;
-
- size_t max_size = current_working_set_size;
- if (last_working_set_size_)
- max_size = (max_size + last_working_set_size_) / 2; // Average.
- max_size *= 1024; // Convert to KBytes.
- last_working_set_size_ = current_working_set_size / kDampingFactor;
-
- BOOL rv = SetProcessWorkingSetSize(process_, kWinDefaultMinSet, max_size);
- return rv == TRUE;
-}
-
-bool Process::UnReduceWorkingSet() {
- if (!process_)
- return false;
-
- if (!last_working_set_size_)
- return true; // There was nothing to undo.
-
- // We've had a reduced working set. Make sure we have lots of
- // headroom now that we're active again.
- size_t limit = last_working_set_size_ * kDampingFactor * 2 * 1024;
- BOOL rv = SetProcessWorkingSetSize(process_, kWinDefaultMinSet, limit);
- return rv == TRUE;
-}
-
-bool Process::EmptyWorkingSet() {
- if (!process_)
- return false;
-
- BOOL rv = SetProcessWorkingSetSize(process_, -1, -1);
- return rv == TRUE;
-}
-
ProcessId Process::pid() const {
if (process_ == 0)
return 0;