summaryrefslogtreecommitdiffstats
path: root/third_party/tcmalloc
diff options
context:
space:
mode:
authormbelshe@google.com <mbelshe@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-02 17:33:23 +0000
committermbelshe@google.com <mbelshe@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-02 17:33:23 +0000
commitc585892d2c42a47c95d06a684a6685156c545403 (patch)
tree6f8ad5bafe16b73852706ad599770b99c18a3db2 /third_party/tcmalloc
parentb80c6a3c6d7a50dc359e4552249d79d394a77f02 (diff)
downloadchromium_src-c585892d2c42a47c95d06a684a6685156c545403.zip
chromium_src-c585892d2c42a47c95d06a684a6685156c545403.tar.gz
chromium_src-c585892d2c42a47c95d06a684a6685156c545403.tar.bz2
Landing for Anton Muhin's tcmalloc patch:
http://codereview.chromium.org/180021/show Restore decommitting in IncrementalScavenge and draft Scavenge method to be invoked periodically to reduce amount of committed pages. BUG=none TEST=none Review URL: http://codereview.chromium.org/187008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@25188 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'third_party/tcmalloc')
-rw-r--r--third_party/tcmalloc/config_linux.h6
-rw-r--r--third_party/tcmalloc/config_win.h6
-rw-r--r--third_party/tcmalloc/page_heap.cc142
-rw-r--r--third_party/tcmalloc/page_heap.h259
-rw-r--r--third_party/tcmalloc/tcmalloc.gyp1
5 files changed, 400 insertions, 14 deletions
diff --git a/third_party/tcmalloc/config_linux.h b/third_party/tcmalloc/config_linux.h
index b359ed5..af12784 100644
--- a/third_party/tcmalloc/config_linux.h
+++ b/third_party/tcmalloc/config_linux.h
@@ -31,6 +31,12 @@
*/
#define HAVE_DECL_VALLOC 1
+/* Define to 1 if you prefer to defer decommitting pages
+ (on OSes which have ability) which could be performed offline
+ (either by background thread or in idle time.)
+ */
+#define DEFER_DECOMMIT 0
+
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
diff --git a/third_party/tcmalloc/config_win.h b/third_party/tcmalloc/config_win.h
index 9f4efa8..51c73c5 100644
--- a/third_party/tcmalloc/config_win.h
+++ b/third_party/tcmalloc/config_win.h
@@ -61,6 +61,12 @@
*/
#undef HAVE_DECL_VALLOC
+/* Define to 1 if you prefer to defer decommitting pages
+ (on OSes which have ability) which could be performed offline
+ (either by background thread or in idle time.)
+ */
+#define DEFER_DECOMMIT 0
+
/* Define to 1 if you have the <dlfcn.h> header file. */
#undef HAVE_DLFCN_H
diff --git a/third_party/tcmalloc/page_heap.cc b/third_party/tcmalloc/page_heap.cc
index afe9e34..7e54bf5 100644
--- a/third_party/tcmalloc/page_heap.cc
+++ b/third_party/tcmalloc/page_heap.cc
@@ -51,7 +51,12 @@ PageHeap::PageHeap()
pagemap_cache_(0),
free_pages_(0),
system_bytes_(0),
+#if DEFER_DECOMMIT
+ free_committed_pages_(0),
+ pages_committed_since_last_scavenge_(0),
+#else
scavenge_counter_(0),
+#endif
// Start scavenging at kMaxPages list
scavenge_index_(kMaxPages-1) {
COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
@@ -150,6 +155,16 @@ Span* PageHeap::Split(Span* span, Length n) {
return leftover;
}
+void PageHeap::CommitSpan(Span* span) {
+ TCMalloc_SystemCommit(
+ reinterpret_cast<void*>(span->start << kPageShift),
+ static_cast<size_t>(span->length << kPageShift)
+ );
+#if DEFER_DECOMMIT
+ pages_committed_since_last_scavenge_ += span->length;
+#endif
+}
+
Span* PageHeap::Carve(Span* span, Length n) {
ASSERT(n > 0);
ASSERT(span->location != Span::IN_USE);
@@ -175,14 +190,21 @@ Span* PageHeap::Carve(Span* span, Length n) {
span->length = n;
pagemap_.set(span->start + n - 1, span);
}
- ASSERT(Check());
- free_pages_ -= n;
if (old_location == Span::ON_RETURNED_FREELIST) {
// We need to recommit this address space.
- TCMalloc_SystemCommit(
- reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
+ CommitSpan(span);
+ } else {
+#if DEFER_DECOMMIT
+ // The newly allocated memory is from a span that's already committed.
+ // Update the free_committed_pages_ count.
+ ASSERT(free_committed_pages_ >= n);
+ free_committed_pages_ -= n;
+#endif
}
+ ASSERT(span->location == Span::IN_USE);
+ ASSERT(span->length == n);
+ ASSERT(Check());
+ free_pages_ -= n;
return span;
}
@@ -201,10 +223,9 @@ void PageHeap::Delete(Span* span) {
// care about the pagemap entries for the boundaries.
//
// Note that the spans we merge into "span" may come out of
- // a "normal" list. For simplicity, we move these into the
- // "returned" list of the appropriate size class. We do this
- // so that we can maximize large, continuous blocks of freed
- // space.
+ // a "returned" list. We move those into "normal" list
+ // as for 'immediate' operations we favour committing over
+ // decommitting (decommitting is performed offline).
const PageID p = span->start;
const Length n = span->length;
Span* prev = GetDescriptor(p-1);
@@ -212,6 +233,12 @@ void PageHeap::Delete(Span* span) {
// Merge preceding span into this span
ASSERT(prev->start + prev->length == p);
const Length len = prev->length;
+ if (prev->location == Span::ON_RETURNED_FREELIST) {
+ CommitSpan(prev);
+#if DEFER_DECOMMIT
+ free_committed_pages_ += len;
+#endif
+ }
DLL_Remove(prev);
DeleteSpan(prev);
span->start -= len;
@@ -224,6 +251,12 @@ void PageHeap::Delete(Span* span) {
// Merge next span into this span
ASSERT(next->start == p+n);
const Length len = next->length;
+ if (next->location == Span::ON_RETURNED_FREELIST) {
+ CommitSpan(next);
+#if DEFER_DECOMMIT
+ free_committed_pages_ += len;
+#endif
+ }
DLL_Remove(next);
DeleteSpan(next);
span->length += len;
@@ -232,19 +265,93 @@ void PageHeap::Delete(Span* span) {
}
Event(span, 'D', span->length);
- span->location = Span::ON_RETURNED_FREELIST;
- TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
+ span->location = Span::ON_NORMAL_FREELIST;
if (span->length < kMaxPages)
- DLL_Prepend(&free_[span->length].returned, span);
+ DLL_Prepend(&free_[span->length].normal, span);
else
- DLL_Prepend(&large_.returned, span);
+ DLL_Prepend(&large_.normal, span);
free_pages_ += n;
+#if DEFER_DECOMMIT
+ free_committed_pages_ += n;
+#endif
+#if DEFER_DECOMMIT
+ // TODO(antonm): notify that could start scavenging
+#else
IncrementalScavenge(n);
+#endif
ASSERT(Check());
}
+
+void PageHeap::Scavenge() {
+#if DEFER_DECOMMIT
+ // If we have to commit memory since the last scavenge, it means we don't
+ // have enough free committed pages of necessary size for the amount of
+ // allocations that we do. So hold off on releasing memory back to the system.
+ if (pages_committed_since_last_scavenge_ > 0) {
+ pages_committed_since_last_scavenge_ = 0;
+ return;
+ }
+
+ if (free_committed_pages_ <= kMinimumFreeCommittedPageCount) {
+ return;
+ }
+
+ uint64_t to_decommit = std::min(
+ free_committed_pages_ - kMinimumFreeCommittedPageCount,
+ free_committed_pages_ / kMaxScavengeAmountFactor);
+ to_decommit = DecommitFromSpanList(&large_, to_decommit);
+ for (int i = kMaxPages - 1; i >= 0; i--) {
+ to_decommit = DecommitFromSpanList(&free_[i], to_decommit);
+ }
+
+ // Force at least one decommit from large list, otherwise big sized blocks
+ // sitting might block as from releasing smaller blocks behind.
+ if (to_decommit > 0) {
+ if (!DLL_IsEmpty(&large_.normal)) {
+ DecommitLastSpan(&large_, large_.normal.prev);
+ }
+ }
+#endif
+}
+
+#if DEFER_DECOMMIT
+Length PageHeap::DecommitLastSpan(SpanList* span_list, Span* span) {
+ ASSERT(!DLL_IsEmpty(&span_list->normal));
+ ASSERT(span_list->normal.prev == span);
+
+ Length length = span->length;
+
+ DLL_Remove(span);
+
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift), span->length << kPageShift);
+ span->location = Span::ON_RETURNED_FREELIST;
+ ASSERT(free_committed_pages_ >= length);
+ free_committed_pages_ -= length;
+
+ DLL_Prepend(&span_list->returned, span);
+
+ return length;
+}
+
+uint64_t PageHeap::DecommitFromSpanList(SpanList* span_list, uint64_t to_decommit) {
+ while (!DLL_IsEmpty(&span_list->normal)) {
+ // Release the last span on the normal portion of this list.
+ Span* span = span_list->normal.prev;
+
+ if (span->length > to_decommit) {
+ return to_decommit;
+ }
+
+ to_decommit -= DecommitLastSpan(span_list, span);
+ }
+
+ return to_decommit;
+}
+
+#else
+
void PageHeap::IncrementalScavenge(Length n) {
// Fast path; not yet time to release memory
scavenge_counter_ -= n;
@@ -304,6 +411,7 @@ void PageHeap::IncrementalScavenge(Length n) {
// Nothing to scavenge, delay for a while
scavenge_counter_ = kDefaultReleaseDelay;
}
+#endif
void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
// Associate span object with all interior pages as well
@@ -405,6 +513,9 @@ bool PageHeap::GrowHeap(Length n) {
if (ptr == NULL) return false;
}
ask = actual_size >> kPageShift;
+#if DEFER_DECOMMIT
+ pages_committed_since_last_scavenge_ += ask;
+#endif
RecordGrowth(ask << kPageShift);
uint64_t old_system_bytes = system_bytes_;
@@ -490,6 +601,9 @@ void PageHeap::ReleaseFreePages() {
}
ReleaseFreeList(&large_.normal, &large_.returned);
ASSERT(Check());
+#if DEFER_DECOMMIT
+ free_committed_pages_ = 0;
+#endif
}
} // namespace tcmalloc
diff --git a/third_party/tcmalloc/page_heap.h b/third_party/tcmalloc/page_heap.h
new file mode 100644
index 0000000..242623b
--- /dev/null
+++ b/third_party/tcmalloc/page_heap.h
@@ -0,0 +1,259 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Sanjay Ghemawat <opensource@google.com>
+
+#ifndef TCMALLOC_PAGE_HEAP_H_
+#define TCMALLOC_PAGE_HEAP_H_
+
+#include <config.h>
+#include "common.h"
+#include "packed-cache-inl.h"
+#include "pagemap.h"
+#include "span.h"
+
+// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
+// you're porting to a system where you really can't get a stacktrace.
+#ifdef NO_TCMALLOC_SAMPLES
+ // We use #define so code compiles even if you #include stacktrace.h somehow.
+# define GetStackTrace(stack, depth, skip) (0)
+#else
+# include <google/stacktrace.h>
+#endif
+
+
+namespace tcmalloc {
+
+// -------------------------------------------------------------------------
+// Map from page-id to per-page data
+// -------------------------------------------------------------------------
+
+// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
+// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
+// because sometimes the sizeclass is all the information we need.
+
+// Selector class -- general selector uses 3-level map
+template <int BITS> class MapSelector {
+ public:
+ typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
+ typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
+};
+
+// A two-level map for 32-bit machines
+template <> class MapSelector<32> {
+ public:
+ typedef TCMalloc_PageMap2<32-kPageShift> Type;
+ typedef PackedCache<32-kPageShift, uint16_t> CacheType;
+};
+
+// -------------------------------------------------------------------------
+// Page-level allocator
+// * Eager coalescing
+//
+// Heap for page-level allocation. We allow allocating and freeing a
+// contiguous runs of pages (called a "span").
+// -------------------------------------------------------------------------
+
+class PageHeap {
+ public:
+ PageHeap();
+
+ // Allocate a run of "n" pages. Returns zero if out of memory.
+ // Caller should not pass "n == 0" -- instead, n should have
+ // been rounded up already.
+ Span* New(Length n);
+
+ // Delete the span "[p, p+n-1]".
+ // REQUIRES: span was returned by earlier call to New() and
+ // has not yet been deleted.
+ void Delete(Span* span);
+
+ // Mark an allocated span as being used for small objects of the
+ // specified size-class.
+ // REQUIRES: span was returned by an earlier call to New()
+ // and has not yet been deleted.
+ void RegisterSizeClass(Span* span, size_t sc);
+
+ // Split an allocated span into two spans: one of length "n" pages
+ // followed by another span of length "span->length - n" pages.
+ // Modifies "*span" to point to the first span of length "n" pages.
+ // Returns a pointer to the second span.
+ //
+ // REQUIRES: "0 < n < span->length"
+ // REQUIRES: span->location == IN_USE
+ // REQUIRES: span->sizeclass == 0
+ Span* Split(Span* span, Length n);
+
+ // Return the descriptor for the specified page.
+ inline Span* GetDescriptor(PageID p) const {
+ return reinterpret_cast<Span*>(pagemap_.get(p));
+ }
+
+ // Dump state to stderr
+ void Dump(TCMalloc_Printer* out);
+
+ // Return number of bytes allocated from system
+ inline uint64_t SystemBytes() const { return system_bytes_; }
+
+ // Return number of free bytes in heap
+ uint64_t FreeBytes() const {
+ return (static_cast<uint64_t>(free_pages_) << kPageShift);
+ }
+
+ bool Check();
+ // Like Check() but does some more comprehensive checking.
+ bool CheckExpensive();
+ bool CheckList(Span* list, Length min_pages, Length max_pages,
+ int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
+
+ // Release all pages on the free list for reuse by the OS:
+ void ReleaseFreePages();
+
+ // Return 0 if we have no information, or else the correct sizeclass for p.
+ // Reads and writes to pagemap_cache_ do not require locking.
+ // The entries are 64 bits on 64-bit hardware and 16 bits on
+ // 32-bit hardware, and we don't mind raciness as long as each read of
+ // an entry yields a valid entry, not a partially updated entry.
+ size_t GetSizeClassIfCached(PageID p) const {
+ return pagemap_cache_.GetOrDefault(p, 0);
+ }
+ void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
+
+ // Attempt to free some free pages currently not used.
+ void Scavenge();
+
+ private:
+ // Allocates a big block of memory for the pagemap once we reach more than
+ // 128MB
+ static const size_t kPageMapBigAllocationThreshold = 128 << 20;
+
+ // Minimum number of pages to fetch from system at a time. Must be
+ // significantly bigger than kBlockSize to amortize system-call
+ // overhead, and also to reduce external fragementation. Also, we
+ // should keep this value big because various incarnations of Linux
+ // have small limits on the number of mmap() regions per
+ // address-space.
+ static const int kMinSystemAlloc = 1 << (20 - kPageShift);
+
+ // For all span-lengths < kMaxPages we keep an exact-size list.
+ // REQUIRED: kMaxPages >= kMinSystemAlloc;
+ static const size_t kMaxPages = kMinSystemAlloc;
+
+ // Pick the appropriate map and cache types based on pointer size
+ typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
+ typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
+ PageMap pagemap_;
+ mutable PageMapCache pagemap_cache_;
+
+ // We segregate spans of a given size into two circular linked
+ // lists: one for normal spans, and one for spans whose memory
+ // has been returned to the system.
+ struct SpanList {
+ Span normal;
+ Span returned;
+ };
+
+ // List of free spans of length >= kMaxPages
+ SpanList large_;
+
+ // Array mapping from span length to a doubly linked list of free spans
+ SpanList free_[kMaxPages];
+
+ // Number of pages kept in free lists
+ uintptr_t free_pages_;
+
+ // Bytes allocated from system
+ uint64_t system_bytes_;
+
+ bool GrowHeap(Length n);
+
+ // REQUIRES: span->length >= n
+ // REQUIRES: span->location != IN_USE
+ // Remove span from its free list, and move any leftover part of
+ // span into appropriate free lists. Also update "span" to have
+ // length exactly "n" and mark it as non-free so it can be returned
+ // to the client. After all that, decrease free_pages_ by n and
+ // return span.
+ Span* Carve(Span* span, Length n);
+
+ void RecordSpan(Span* span) {
+ pagemap_.set(span->start, span);
+ if (span->length > 1) {
+ pagemap_.set(span->start + span->length - 1, span);
+ }
+ }
+
+ // Allocate a large span of length == n. If successful, returns a
+ // span of exactly the specified length. Else, returns NULL.
+ Span* AllocLarge(Length n);
+
+ // Commits the span.
+ void CommitSpan(Span* span);
+
+#if DEFER_DECOMMIT
+ // Number of free committed pages that we want to keep around.
+ static const size_t kMinimumFreeCommittedPageCount = 512; // 2M (2 ** 21) for 4K pages
+
+ // During a scavenge, we'll release up to a fraction of the free committed pages.
+#ifdef _WIN32
+ // We are slightly less aggressive in releasing memory on Windows due to performance reasons.
+ static const int kMaxScavengeAmountFactor = 3;
+#else
+ static const int kMaxScavengeAmountFactor = 2;
+#endif
+
+ // Decommits some parts from SpanList.
+ uint64_t DecommitFromSpanList(SpanList* span_list, uint64_t to_decommit);
+
+ // Decommits some parts from SpanList.
+ Length DecommitLastSpan(SpanList* span_list, Span* span);
+
+ // Number of pages kept in free lists that are still committed a.k.a. total
+ // number of pages in "normal" lists.
+ uint64_t free_committed_pages_;
+
+ // Number of pages that we commited in the last scavenge wait interval.
+ uint64_t pages_committed_since_last_scavenge_;
+#else
+ // Incrementally release some memory to the system.
+ // IncrementalScavenge(n) is called whenever n pages are freed.
+ void IncrementalScavenge(Length n);
+
+ // Number of pages to deallocate before doing more scavenging
+ int64_t scavenge_counter_;
+#endif
+
+ // Index of last free list we scavenged
+ int scavenge_index_;
+};
+
+} // namespace tcmalloc
+
+#endif // TCMALLOC_PAGE_HEAP_H_
diff --git a/third_party/tcmalloc/tcmalloc.gyp b/third_party/tcmalloc/tcmalloc.gyp
index 86aad51..ddc85ee 100644
--- a/third_party/tcmalloc/tcmalloc.gyp
+++ b/third_party/tcmalloc/tcmalloc.gyp
@@ -121,6 +121,7 @@
'allocator_shim.cc',
'generic_allocators.cc',
'page_heap.cc',
+ 'page_heap.h',
'port.cc',
'system-alloc.h',
'tcmalloc.cc',