summaryrefslogtreecommitdiffstats
path: root/third_party/tcmalloc/page_heap.cc
diff options
context:
space:
mode:
authormbelshe@google.com <mbelshe@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-02 17:33:23 +0000
committermbelshe@google.com <mbelshe@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-02 17:33:23 +0000
commitc585892d2c42a47c95d06a684a6685156c545403 (patch)
tree6f8ad5bafe16b73852706ad599770b99c18a3db2 /third_party/tcmalloc/page_heap.cc
parentb80c6a3c6d7a50dc359e4552249d79d394a77f02 (diff)
downloadchromium_src-c585892d2c42a47c95d06a684a6685156c545403.zip
chromium_src-c585892d2c42a47c95d06a684a6685156c545403.tar.gz
chromium_src-c585892d2c42a47c95d06a684a6685156c545403.tar.bz2
Landing for Anton Muhin's tcmalloc patch:
http://codereview.chromium.org/180021/show Restore decommitting in IncrementalScavenge and draft Scavenge method to be invoked periodically to reduce amount of committed pages. BUG=none TEST=none Review URL: http://codereview.chromium.org/187008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@25188 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'third_party/tcmalloc/page_heap.cc')
-rw-r--r--third_party/tcmalloc/page_heap.cc142
1 files changed, 128 insertions, 14 deletions
diff --git a/third_party/tcmalloc/page_heap.cc b/third_party/tcmalloc/page_heap.cc
index afe9e34..7e54bf5 100644
--- a/third_party/tcmalloc/page_heap.cc
+++ b/third_party/tcmalloc/page_heap.cc
@@ -51,7 +51,12 @@ PageHeap::PageHeap()
pagemap_cache_(0),
free_pages_(0),
system_bytes_(0),
+#if DEFER_DECOMMIT
+ free_committed_pages_(0),
+ pages_committed_since_last_scavenge_(0),
+#else
scavenge_counter_(0),
+#endif
// Start scavenging at kMaxPages list
scavenge_index_(kMaxPages-1) {
COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
@@ -150,6 +155,16 @@ Span* PageHeap::Split(Span* span, Length n) {
return leftover;
}
+void PageHeap::CommitSpan(Span* span) {
+ TCMalloc_SystemCommit(
+ reinterpret_cast<void*>(span->start << kPageShift),
+ static_cast<size_t>(span->length << kPageShift)
+ );
+#if DEFER_DECOMMIT
+ pages_committed_since_last_scavenge_ += span->length;
+#endif
+}
+
Span* PageHeap::Carve(Span* span, Length n) {
ASSERT(n > 0);
ASSERT(span->location != Span::IN_USE);
@@ -175,14 +190,21 @@ Span* PageHeap::Carve(Span* span, Length n) {
span->length = n;
pagemap_.set(span->start + n - 1, span);
}
- ASSERT(Check());
- free_pages_ -= n;
if (old_location == Span::ON_RETURNED_FREELIST) {
// We need to recommit this address space.
- TCMalloc_SystemCommit(
- reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
+ CommitSpan(span);
+ } else {
+#if DEFER_DECOMMIT
+ // The newly allocated memory is from a span that's already committed.
+ // Update the free_committed_pages_ count.
+ ASSERT(free_committed_pages_ >= n);
+ free_committed_pages_ -= n;
+#endif
}
+ ASSERT(span->location == Span::IN_USE);
+ ASSERT(span->length == n);
+ ASSERT(Check());
+ free_pages_ -= n;
return span;
}
@@ -201,10 +223,9 @@ void PageHeap::Delete(Span* span) {
// care about the pagemap entries for the boundaries.
//
// Note that the spans we merge into "span" may come out of
- // a "normal" list. For simplicity, we move these into the
- // "returned" list of the appropriate size class. We do this
- // so that we can maximize large, continuous blocks of freed
- // space.
+ // a "returned" list. We move those into "normal" list
+ // as for 'immediate' operations we favour committing over
+ // decommitting (decommitting is performed offline).
const PageID p = span->start;
const Length n = span->length;
Span* prev = GetDescriptor(p-1);
@@ -212,6 +233,12 @@ void PageHeap::Delete(Span* span) {
// Merge preceding span into this span
ASSERT(prev->start + prev->length == p);
const Length len = prev->length;
+ if (prev->location == Span::ON_RETURNED_FREELIST) {
+ CommitSpan(prev);
+#if DEFER_DECOMMIT
+ free_committed_pages_ += len;
+#endif
+ }
DLL_Remove(prev);
DeleteSpan(prev);
span->start -= len;
@@ -224,6 +251,12 @@ void PageHeap::Delete(Span* span) {
// Merge next span into this span
ASSERT(next->start == p+n);
const Length len = next->length;
+ if (next->location == Span::ON_RETURNED_FREELIST) {
+ CommitSpan(next);
+#if DEFER_DECOMMIT
+ free_committed_pages_ += len;
+#endif
+ }
DLL_Remove(next);
DeleteSpan(next);
span->length += len;
@@ -232,19 +265,93 @@ void PageHeap::Delete(Span* span) {
}
Event(span, 'D', span->length);
- span->location = Span::ON_RETURNED_FREELIST;
- TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
+ span->location = Span::ON_NORMAL_FREELIST;
if (span->length < kMaxPages)
- DLL_Prepend(&free_[span->length].returned, span);
+ DLL_Prepend(&free_[span->length].normal, span);
else
- DLL_Prepend(&large_.returned, span);
+ DLL_Prepend(&large_.normal, span);
free_pages_ += n;
+#if DEFER_DECOMMIT
+ free_committed_pages_ += n;
+#endif
+#if DEFER_DECOMMIT
+ // TODO(antonm): notify that could start scavenging
+#else
IncrementalScavenge(n);
+#endif
ASSERT(Check());
}
+
+void PageHeap::Scavenge() {
+#if DEFER_DECOMMIT
+ // If we have to commit memory since the last scavenge, it means we don't
+ // have enough free committed pages of necessary size for the amount of
+ // allocations that we do. So hold off on releasing memory back to the system.
+ if (pages_committed_since_last_scavenge_ > 0) {
+ pages_committed_since_last_scavenge_ = 0;
+ return;
+ }
+
+ if (free_committed_pages_ <= kMinimumFreeCommittedPageCount) {
+ return;
+ }
+
+ uint64_t to_decommit = std::min(
+ free_committed_pages_ - kMinimumFreeCommittedPageCount,
+ free_committed_pages_ / kMaxScavengeAmountFactor);
+ to_decommit = DecommitFromSpanList(&large_, to_decommit);
+ for (int i = kMaxPages - 1; i >= 0; i--) {
+ to_decommit = DecommitFromSpanList(&free_[i], to_decommit);
+ }
+
+ // Force at least one decommit from large list, otherwise big sized blocks
+ // sitting might block as from releasing smaller blocks behind.
+ if (to_decommit > 0) {
+ if (!DLL_IsEmpty(&large_.normal)) {
+ DecommitLastSpan(&large_, large_.normal.prev);
+ }
+ }
+#endif
+}
+
+#if DEFER_DECOMMIT
+Length PageHeap::DecommitLastSpan(SpanList* span_list, Span* span) {
+ ASSERT(!DLL_IsEmpty(&span_list->normal));
+ ASSERT(span_list->normal.prev == span);
+
+ Length length = span->length;
+
+ DLL_Remove(span);
+
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), span->length << kPageShift);
+ span->location = Span::ON_RETURNED_FREELIST;
+ ASSERT(free_committed_pages_ >= length);
+ free_committed_pages_ -= length;
+
+ DLL_Prepend(&span_list->returned, span);
+
+ return length;
+}
+
+uint64_t PageHeap::DecommitFromSpanList(SpanList* span_list, uint64_t to_decommit) {
+ while (!DLL_IsEmpty(&span_list->normal)) {
+ // Release the last span on the normal portion of this list.
+ Span* span = span_list->normal.prev;
+
+ if (span->length > to_decommit) {
+ return to_decommit;
+ }
+
+ to_decommit -= DecommitLastSpan(span_list, span);
+ }
+
+ return to_decommit;
+}
+
+#else
+
void PageHeap::IncrementalScavenge(Length n) {
// Fast path; not yet time to release memory
scavenge_counter_ -= n;
@@ -304,6 +411,7 @@ void PageHeap::IncrementalScavenge(Length n) {
// Nothing to scavenge, delay for a while
scavenge_counter_ = kDefaultReleaseDelay;
}
+#endif
void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
// Associate span object with all interior pages as well
@@ -405,6 +513,9 @@ bool PageHeap::GrowHeap(Length n) {
if (ptr == NULL) return false;
}
ask = actual_size >> kPageShift;
+#if DEFER_DECOMMIT
+ pages_committed_since_last_scavenge_ += ask;
+#endif
RecordGrowth(ask << kPageShift);
uint64_t old_system_bytes = system_bytes_;
@@ -490,6 +601,9 @@ void PageHeap::ReleaseFreePages() {
}
ReleaseFreeList(&large_.normal, &large_.returned);
ASSERT(Check());
+#if DEFER_DECOMMIT
+ free_committed_pages_ = 0;
+#endif
}
} // namespace tcmalloc