summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsgk@chromium.org <sgk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-12-01 23:27:45 +0000
committersgk@chromium.org <sgk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-12-01 23:27:45 +0000
commit695410449699e7069c4b3f68bdb6fa2a01cb3e20 (patch)
tree042b88a20c39e52eda262c61f70d42f29c0eb9d3
parente096f2fc452868e29be42f30c21238d8a3e40896 (diff)
downloadchromium_src-695410449699e7069c4b3f68bdb6fa2a01cb3e20.zip
chromium_src-695410449699e7069c4b3f68bdb6fa2a01cb3e20.tar.gz
chromium_src-695410449699e7069c4b3f68bdb6fa2a01cb3e20.tar.bz2
Remove the old locations of forked tcmalloc files.
Remove the tcmalloc line from DEPS in favor of our vendor branch. Remove the old jemalloc checked in underneath tcmalloc. BUG=27911 TEST=none Review URL: http://codereview.chromium.org/457002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@33506 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--.gitignore1
-rw-r--r--DEPS3
-rw-r--r--third_party/tcmalloc/README60
-rw-r--r--third_party/tcmalloc/allocator_shim.cc266
-rw-r--r--third_party/tcmalloc/config.h20
-rw-r--r--third_party/tcmalloc/config_linux.h228
-rw-r--r--third_party/tcmalloc/config_win.h270
-rw-r--r--third_party/tcmalloc/generic_allocators.cc137
-rw-r--r--third_party/tcmalloc/google/tcmalloc.h80
-rw-r--r--third_party/tcmalloc/heap-checker.h416
-rw-r--r--third_party/tcmalloc/jemalloc/jemalloc.c7276
-rw-r--r--third_party/tcmalloc/jemalloc/jemalloc.h222
-rw-r--r--third_party/tcmalloc/jemalloc/ql.h115
-rw-r--r--third_party/tcmalloc/jemalloc/qr.h99
-rw-r--r--third_party/tcmalloc/jemalloc/rb.h983
-rw-r--r--third_party/tcmalloc/page_heap.cc530
-rw-r--r--third_party/tcmalloc/page_heap.h241
-rw-r--r--third_party/tcmalloc/port.cc302
-rw-r--r--third_party/tcmalloc/prep_libc.sh35
-rw-r--r--third_party/tcmalloc/symbolize_linux.cc188
-rw-r--r--third_party/tcmalloc/system-alloc.cc505
-rw-r--r--third_party/tcmalloc/system-alloc.h119
-rw-r--r--third_party/tcmalloc/tcmalloc.cc1312
-rw-r--r--third_party/tcmalloc/tcmalloc_linux.cc1417
-rw-r--r--third_party/tcmalloc/tcmalloc_unittests.cc490
-rw-r--r--third_party/tcmalloc/unittest_utils.cc18
-rw-r--r--third_party/tcmalloc/win_allocator.cc50
27 files changed, 0 insertions, 15383 deletions
diff --git a/.gitignore b/.gitignore
index b8a4562..db56b2f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,7 +56,6 @@
/third_party/python_24
/third_party/pywebsocket
/third_party/skia
-/third_party/tcmalloc/tcmalloc
/third_party/xdg-utils
/third_party/xulrunner-sdk
/third_party/yasm/source/patched-yasm
diff --git a/DEPS b/DEPS
index 26ffeb3..7e795c0 100644
--- a/DEPS
+++ b/DEPS
@@ -67,9 +67,6 @@ deps = {
"src/third_party/ots":
"http://ots.googlecode.com/svn/trunk@19",
- "src/third_party/tcmalloc/tcmalloc":
- "http://google-perftools.googlecode.com/svn/trunk@77",
-
"src/tools/page_cycler/acid3":
"/trunk/deps/page_cycler/acid3@19546",
diff --git a/third_party/tcmalloc/README b/third_party/tcmalloc/README
deleted file mode 100644
index b72502c..0000000
--- a/third_party/tcmalloc/README
+++ /dev/null
@@ -1,60 +0,0 @@
-Notes about the Chrome port of tcmalloc & jemalloc.
-
-Background
-----------
-We use this library as a generic way to fork into any of several allocators.
-Currently we can, at runtime, switch between:
- the default windows allocator
- the windows low-fragmentation-heap
- tcmalloc
- jemalloc (the heap used most notably within Mozilla Firefox)
-
-The mechanism for hooking LIBCMT in windows is rather tricky. The core
-problem is that by default, the windows library does not declare malloc and
-free as weak symbols. Because of this, they cannot be overriden. To work
-around this, we start with the LIBCMT.LIB, and manually remove all allocator
-related functions from it using the visual studio library tool. Once removed,
-we can now link against the library and provide custom versions of the
-allocator related functionality.
-
-
-Source code
------------
-Everything within the directory tcmalloc/tcmalloc is pulled directly from the
-google-perftools repository. For the most part, tcmalloc is a stock build
-from there.
-
-We have forked a few files. We always push our changes upstream, so over
-time the forked files should disappear. Currently forked files include:
- page_heap.cc
- port.cc
- system-alloc.cc
- system-alloc.h
- tcmalloc.cc
-
-Adding a new allocator requires definition of the following five functions:
- extern "C" {
- bool init();
- void* malloc(size_t s);
- void* realloc(void* p, size_t s);
- void free(void* s);
- size_t msize(void* p);
- }
-
-All other allocation related functions (new/delete/calloc/etc) have been
-implemented generically to work across all allocators.
-
-
-Usage
------
-You can use the different allocators by setting the environment variable
-CHROME_ALLOCATOR to:
- "tcmalloc" - TC Malloc (default)
- "jemalloc" - JE Malloc
- "winheap" - Windows default heap
- "winlfh" - Windows Low-Fragmentation heap
-
-
-Local modifications
--------------------
-jemalloc has been modified slightly to work within the Chromium build.
diff --git a/third_party/tcmalloc/allocator_shim.cc b/third_party/tcmalloc/allocator_shim.cc
deleted file mode 100644
index 39161d4..0000000
--- a/third_party/tcmalloc/allocator_shim.cc
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <config.h>
-
-// When defined, different heap allocators can be used via an environment
-// variable set before running the program. This may reduce the amount
-// of inlining that we get with malloc/free/etc. Disabling makes it
-// so that only tcmalloc can be used.
-#define ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
-
-// TODO(mbelshe): Ensure that all calls to tcmalloc have the proper call depth
-// from the "user code" so that debugging tools (HeapChecker) can work.
-
-// __THROW is defined in glibc systems. It means, counter-intuitively,
-// "This function will never throw an exception." It's an optional
-// optimization tool, but we may need to use it to match glibc prototypes.
-#ifndef __THROW // I guess we're not on a glibc system
-# define __THROW // __THROW is just an optimization, so ok to make it ""
-#endif
-
-// new_mode behaves similarly to MSVC's _set_new_mode.
-// If flag is 0 (default), calls to malloc will behave normally.
-// If flag is 1, calls to malloc will behave like calls to new,
-// and the std_new_handler will be invoked on failure.
-// Can be set by calling _set_new_mode().
-static int new_mode = 0;
-
-typedef enum {
- TCMALLOC, // TCMalloc is the default allocator.
- JEMALLOC, // JEMalloc
- WINDEFAULT, // Windows Heap
- WINLFH, // Windows LFH Heap
-} Allocator;
-
-// This is the default allocator.
-static Allocator allocator = TCMALLOC;
-
-// We include tcmalloc and the win_allocator to get as much inlining as
-// possible.
-#include "tcmalloc.cc"
-#include "win_allocator.cc"
-
-// Forward declarations from jemalloc.
-extern "C" {
-void* je_malloc(size_t s);
-void* je_realloc(void* p, size_t s);
-void je_free(void* s);
-size_t je_msize(void* p);
-bool je_malloc_init_hard();
-}
-
-extern "C" {
-
-// Call the new handler, if one has been set.
-// Returns true on successfully calling the handler, false otherwise.
-inline bool call_new_handler(bool nothrow) {
- // Get the current new handler. NB: this function is not
- // thread-safe. We make a feeble stab at making it so here, but
- // this lock only protects against tcmalloc interfering with
- // itself, not with other libraries calling set_new_handler.
- std::new_handler nh;
- {
- SpinLockHolder h(&set_new_handler_lock);
- nh = std::set_new_handler(0);
- (void) std::set_new_handler(nh);
- }
-#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
- if (!nh)
- return false;
- // Since exceptions are disabled, we don't really know if new_handler
- // failed. Assume it will abort if it fails.
- (*nh)();
- return false; // break out of the retry loop.
-#else
- // If no new_handler is established, the allocation failed.
- if (!nh) {
- if (nothrow)
- return 0;
- throw std::bad_alloc();
- }
- // Otherwise, try the new_handler. If it returns, retry the
- // allocation. If it throws std::bad_alloc, fail the allocation.
- // if it throws something else, don't interfere.
- try {
- (*nh)();
- } catch (const std::bad_alloc&) {
- if (!nothrow)
- throw;
- return p;
- }
-#endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
-}
-
-void* malloc(size_t size) __THROW {
- void* ptr;
- for (;;) {
-#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
- switch (allocator) {
- case JEMALLOC:
- ptr = je_malloc(size);
- break;
- case WINDEFAULT:
- case WINLFH:
- ptr = win_heap_malloc(size);
- break;
- case TCMALLOC:
- default:
- ptr = do_malloc(size);
- break;
- }
-#else
- // TCMalloc case.
- ptr = do_malloc(size);
-#endif
- if (ptr)
- return ptr;
-
- if (!new_mode || !call_new_handler(true))
- break;
- }
- return ptr;
-}
-
-void free(void* p) __THROW {
-#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
- switch (allocator) {
- case JEMALLOC:
- je_free(p);
- return;
- case WINDEFAULT:
- case WINLFH:
- win_heap_free(p);
- return;
- }
-#endif
- // TCMalloc case.
- do_free(p);
-}
-
-void* realloc(void* ptr, size_t size) __THROW {
- // Webkit is brittle for allocators that return NULL for malloc(0). The
- // realloc(0, 0) code path does not guarantee a non-NULL return, so be sure
- // to call malloc for this case.
- if (!ptr)
- return malloc(size);
-
- void* new_ptr;
- for (;;) {
-#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
- switch (allocator) {
- case JEMALLOC:
- new_ptr = je_realloc(ptr, size);
- break;
- case WINDEFAULT:
- case WINLFH:
- new_ptr = win_heap_realloc(ptr, size);
- break;
- case TCMALLOC:
- default:
- new_ptr = do_realloc(ptr, size);
- break;
- }
-#else
- // TCMalloc case.
- new_ptr = do_realloc(ptr, size);
-#endif
-
- // Subtle warning: NULL return does not alwas indicate out-of-memory. If
- // the requested new size is zero, realloc should free the ptr and return
- // NULL.
- if (new_ptr || !size)
- return new_ptr;
- if (!new_mode || !call_new_handler(true))
- break;
- }
- return new_ptr;
-}
-
-// TODO(mbelshe): Implement this for other allocators.
-void malloc_stats(void) __THROW {
-#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
- switch (allocator) {
- case JEMALLOC:
- // No stats.
- return;
- case WINDEFAULT:
- case WINLFH:
- // No stats.
- return;
- }
-#endif
- tc_malloc_stats();
-}
-
-#ifdef WIN32
-
-extern "C" size_t _msize(void* p) {
-#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
- switch (allocator) {
- case JEMALLOC:
- return je_msize(p);
- case WINDEFAULT:
- case WINLFH:
- return win_heap_msize(p);
- }
-#endif
- return MallocExtension::instance()->GetAllocatedSize(p);
-}
-
-// This is included to resolve references from libcmt.
-extern "C" intptr_t _get_heap_handle() {
- return 0;
-}
-
-// The CRT heap initialization stub.
-extern "C" int _heap_init() {
-#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
- const char* override = GetenvBeforeMain("CHROME_ALLOCATOR");
- if (override) {
- if (!stricmp(override, "jemalloc"))
- allocator = JEMALLOC;
- else if (!stricmp(override, "winheap"))
- allocator = WINDEFAULT;
- else if (!stricmp(override, "winlfh"))
- allocator = WINLFH;
- else if (!stricmp(override, "tcmalloc"))
- allocator = TCMALLOC;
- }
-
- switch (allocator) {
- case JEMALLOC:
- return je_malloc_init_hard() ? 0 : 1;
- case WINDEFAULT:
- return win_heap_init(false) ? 1 : 0;
- case WINLFH:
- return win_heap_init(true) ? 1 : 0;
- case TCMALLOC:
- default:
- // fall through
- break;
- }
-#endif
- // Initializing tcmalloc.
- // We intentionally leak this object. It lasts for the process
- // lifetime. Trying to teardown at _heap_term() is so late that
- // you can't do anything useful anyway.
- new TCMallocGuard();
- return 1;
-}
-
-// The CRT heap cleanup stub.
-extern "C" void _heap_term() {}
-
-// We set this to 1 because part of the CRT uses a check of _crtheap != 0
-// to test whether the CRT has been initialized. Once we've ripped out
-// the allocators from libcmt, we need to provide this definition so that
-// the rest of the CRT is still usable.
-extern "C" void* _crtheap = reinterpret_cast<void*>(1);
-
-#endif // WIN32
-
-#include "generic_allocators.cc"
-
-} // extern C
diff --git a/third_party/tcmalloc/config.h b/third_party/tcmalloc/config.h
deleted file mode 100644
index 6155a86..0000000
--- a/third_party/tcmalloc/config.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONFIG_H_
-
-#include "build/build_config.h"
-
-#define TC_VERSION_MAJOR 1
-#define TC_VERSION_MINOR 4
-#define TC_VERSION_PATCH ""
-#define TC_VERSION_STRING "google-perftools 1.4"
-
-#if defined(OS_WIN)
-#include "third_party/tcmalloc/config_win.h"
-#elif defined(OS_LINUX)
-#include "third_party/tcmalloc/config_linux.h"
-#endif
-
-#endif // CONFIG_H_
diff --git a/third_party/tcmalloc/config_linux.h b/third_party/tcmalloc/config_linux.h
deleted file mode 100644
index 398f303..0000000
--- a/third_party/tcmalloc/config_linux.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* src/config.h. Generated from config.h.in by configure. */
-/* src/config.h.in. Generated from configure.ac by autoheader. */
-
-/* Define to 1 if compiler supports __builtin_stack_pointer */
-/* #undef HAVE_BUILTIN_STACK_POINTER */
-
-/* Define to 1 if you have the <conflict-signal.h> header file. */
-/* #undef HAVE_CONFLICT_SIGNAL_H */
-
-/* Define to 1 if you have the declaration of `cfree', and to 0 if you don't.
- */
-#define HAVE_DECL_CFREE 1
-
-/* Define to 1 if you have the declaration of `memalign', and to 0 if you
- don't. */
-#define HAVE_DECL_MEMALIGN 1
-
-/* Define to 1 if you have the declaration of `posix_memalign', and to 0 if
- you don't. */
-#define HAVE_DECL_POSIX_MEMALIGN 1
-
-/* Define to 1 if you have the declaration of `pvalloc', and to 0 if you
- don't. */
-#define HAVE_DECL_PVALLOC 1
-
-/* Define to 1 if you have the declaration of `uname', and to 0 if you don't.
- */
-#define HAVE_DECL_UNAME 1
-
-/* Define to 1 if you have the declaration of `valloc', and to 0 if you don't.
- */
-#define HAVE_DECL_VALLOC 1
-
-/* Define to 1 if you have the <dlfcn.h> header file. */
-#define HAVE_DLFCN_H 1
-
-/* Define to 1 if the system has the type `Elf32_Versym'. */
-#define HAVE_ELF32_VERSYM 1
-
-/* Define to 1 if you have the <execinfo.h> header file. */
-#define HAVE_EXECINFO_H 1
-
-/* Define to 1 if you have the <fcntl.h> header file. */
-#define HAVE_FCNTL_H 1
-
-/* Define to 1 if you have the <features.h> header file. */
-#define HAVE_FEATURES_H 1
-
-/* Define to 1 if you have the `geteuid' function. */
-#define HAVE_GETEUID 1
-
-/* Define to 1 if you have the `getpagesize' function. */
-#define HAVE_GETPAGESIZE 1
-
-/* Define to 1 if you have the <glob.h> header file. */
-#define HAVE_GLOB_H 1
-
-/* Define to 1 if you have the <grp.h> header file. */
-#define HAVE_GRP_H 1
-
-/* Define to 1 if you have the <inttypes.h> header file. */
-#define HAVE_INTTYPES_H 1
-
-/* Define to 1 if you have the <libunwind.h> header file. */
-/* #undef HAVE_LIBUNWIND_H */
-
-/* Define to 1 if you have the <linux/ptrace.h> header file. */
-#define HAVE_LINUX_PTRACE_H 1
-
-/* Define to 1 if you have the <malloc.h> header file. */
-#define HAVE_MALLOC_H 1
-
-/* Define to 1 if you have the <memory.h> header file. */
-#define HAVE_MEMORY_H 1
-
-/* Define to 1 if you have a working `mmap' system call. */
-#define HAVE_MMAP 1
-
-/* define if the compiler implements namespaces */
-#define HAVE_NAMESPACES 1
-
-/* Define to 1 if you have the <poll.h> header file. */
-#define HAVE_POLL_H 1
-
-/* define if libc has program_invocation_name */
-#define HAVE_PROGRAM_INVOCATION_NAME 1
-
-/* Define if you have POSIX threads libraries and header files. */
-#define HAVE_PTHREAD 1
-
-/* Define to 1 if you have the <pwd.h> header file. */
-#define HAVE_PWD_H 1
-
-/* Define to 1 if you have the `sbrk' function. */
-#define HAVE_SBRK 1
-
-/* Define to 1 if you have the <sched.h> header file. */
-#define HAVE_SCHED_H 1
-
-/* Define to 1 if you have the <stdint.h> header file. */
-#define HAVE_STDINT_H 1
-
-/* Define to 1 if you have the <stdlib.h> header file. */
-#define HAVE_STDLIB_H 1
-
-/* Define to 1 if you have the <strings.h> header file. */
-#define HAVE_STRINGS_H 1
-
-/* Define to 1 if you have the <string.h> header file. */
-#define HAVE_STRING_H 1
-
-/* Define to 1 if the system has the type `struct mallinfo'. */
-#define HAVE_STRUCT_MALLINFO 1
-
-/* Define to 1 if you have the <sys/prctl.h> header file. */
-#define HAVE_SYS_PRCTL_H 1
-
-/* Define to 1 if you have the <sys/resource.h> header file. */
-#define HAVE_SYS_RESOURCE_H 1
-
-/* Define to 1 if you have the <sys/socket.h> header file. */
-#define HAVE_SYS_SOCKET_H 1
-
-/* Define to 1 if you have the <sys/stat.h> header file. */
-#define HAVE_SYS_STAT_H 1
-
-/* Define to 1 if you have the <sys/syscall.h> header file. */
-#define HAVE_SYS_SYSCALL_H 1
-
-/* Define to 1 if you have the <sys/types.h> header file. */
-#define HAVE_SYS_TYPES_H 1
-
-/* Define to 1 if you have the <sys/wait.h> header file. */
-#define HAVE_SYS_WAIT_H 1
-
-/* Define to 1 if compiler supports __thread */
-#define HAVE_TLS 1
-
-/* Define to 1 if you have the <ucontext.h> header file. */
-#define HAVE_UCONTEXT_H 1
-
-/* Define to 1 if you have the <unistd.h> header file. */
-#define HAVE_UNISTD_H 1
-
-/* Define to 1 if you have the <unwind.h> header file. */
-#define HAVE_UNWIND_H 1
-
-/* define if your compiler has __attribute__ */
-#define HAVE___ATTRIBUTE__ 1
-
-/* Define to 1 if the system has the type `__int64'. */
-/* #undef HAVE___INT64 */
-
-/* prefix where we look for installed files */
-#define INSTALL_PREFIX "/usr/local"
-
-/* Define to 1 if int32_t is equivalent to intptr_t */
-/* #undef INT32_EQUALS_INTPTR */
-
-/* Define to 1 if your C compiler doesn't accept -c and -o together. */
-/* #undef NO_MINUS_C_MINUS_O */
-
-/* Name of package */
-#define PACKAGE "google-perftools"
-
-/* Define to the address where bug reports for this package should be sent. */
-#define PACKAGE_BUGREPORT "opensource@google.com"
-
-/* Define to the full name of this package. */
-#define PACKAGE_NAME "google-perftools"
-
-/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "google-perftools 1.4"
-
-/* Define to the one symbol short name of this package. */
-#define PACKAGE_TARNAME "google-perftools"
-
-/* Define to the version of this package. */
-#define PACKAGE_VERSION "1.4"
-
-/* How to access the PC from a struct ucontext */
-#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
-
-/* Always the empty-string on non-windows systems. On windows, should be
- "__declspec(dllexport)". This way, when we compile the dll, we export our
- functions/classes. It's safe to define this here because config.h is only
- used internally, to compile the DLL, and every DLL source file #includes
- "config.h" before anything else. */
-#define PERFTOOLS_DLL_DECL
-
-/* printf format code for printing a size_t and ssize_t */
-#define PRIdS "zd"
-
-/* printf format code for printing a size_t and ssize_t */
-#define PRIuS "zu"
-
-/* printf format code for printing a size_t and ssize_t */
-#define PRIxS "zx"
-
-/* Define to necessary symbol if this constant uses a non-standard name on
- your system. */
-/* #undef PTHREAD_CREATE_JOINABLE */
-
-/* Define to 1 if you have the ANSI C header files. */
-#define STDC_HEADERS 1
-
-/* the namespace where STL code like vector<> is defined */
-#define STL_NAMESPACE std
-
-/* Version number of package */
-#define VERSION "1.4"
-
-/* C99 says: define this to get the PRI... macros from stdint.h */
-#ifndef __STDC_FORMAT_MACROS
-# define __STDC_FORMAT_MACROS 1
-#endif
-
-/* Define to `__inline__' or `__inline' if that's what the C compiler
- calls it, or to nothing if 'inline' is not supported under any name. */
-#ifndef __cplusplus
-/* #undef inline */
-#endif
-
-
-#ifdef __MINGW32__
-#include "windows/mingw.h"
-#endif
-
diff --git a/third_party/tcmalloc/config_win.h b/third_party/tcmalloc/config_win.h
deleted file mode 100644
index 30daf4f..0000000
--- a/third_party/tcmalloc/config_win.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/* A manual version of config.h fit for windows machines. */
-
-/* Sometimes we accidentally #include this config.h instead of the one
- in .. -- this is particularly true for msys/mingw, which uses the
- unix config.h but also runs code in the windows directory.
- */
-#ifdef __MINGW32__
-#include "../config.h"
-#define GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_
-#endif
-
-#ifndef GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_
-#define GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_
-
-/* define this if you are linking tcmalloc statically and overriding the
- * default allocators.
- * For instructions on how to use this mode, see
- * http://groups.google.com/group/google-perftools/browse_thread/thread/41cd3710af85e57b
- */
-#define WIN32_OVERRIDE_ALLOCATORS
-
-/* the location of <hash_map> */
-#define HASH_MAP_H <hash_map>
-
-/* the namespace of hash_map/hash_set */
-#define HASH_NAMESPACE stdext
-
-/* the location of <hash_set> */
-#define HASH_SET_H <hash_set>
-
-/* Define to 1 if your libc has a snprintf implementation */
-#define HAVE_SNPRINTF
-
-/* Define to 1 if compiler supports __builtin_stack_pointer */
-#undef HAVE_BUILTIN_STACK_POINTER
-
-/* Define to 1 if you have the <conflict-signal.h> header file. */
-#undef HAVE_CONFLICT_SIGNAL_H
-
-/* Define to 1 if you have the declaration of `cfree', and to 0 if you don't.
- */
-#undef HAVE_DECL_CFREE
-
-/* Define to 1 if you have the declaration of `memalign', and to 0 if you
- don't. */
-#undef HAVE_DECL_MEMALIGN
-
-/* Define to 1 if you have the declaration of `posix_memalign', and to 0 if
- you don't. */
-#undef HAVE_DECL_POSIX_MEMALIGN
-
-/* Define to 1 if you have the declaration of `pvalloc', and to 0 if you
- don't. */
-#undef HAVE_DECL_PVALLOC
-
-/* Define to 1 if you have the declaration of `uname', and to 0 if you don't.
- */
-#undef HAVE_DECL_UNAME
-
-/* Define to 1 if you have the declaration of `valloc', and to 0 if you don't.
- */
-#undef HAVE_DECL_VALLOC
-
-/* Define to 1 if you have the <dlfcn.h> header file. */
-#undef HAVE_DLFCN_H
-
-/* Define to 1 if the system has the type `Elf32_Versym'. */
-#undef HAVE_ELF32_VERSYM
-
-/* Define to 1 if you have the <execinfo.h> header file. */
-#undef HAVE_EXECINFO_H
-
-/* Define to 1 if you have the <fcntl.h> header file. */
-#undef HAVE_FCNTL_H
-
-/* Define to 1 if you have the `geteuid' function. */
-#undef HAVE_GETEUID
-
-/* Define to 1 if you have the `getpagesize' function. */
-#define HAVE_GETPAGESIZE 1 /* we define it in windows/port.cc */
-
-/* Define to 1 if you have the <glob.h> header file. */
-#undef HAVE_GLOB_H
-
-/* Define to 1 if you have the <grp.h> header file. */
-#undef HAVE_GRP_H
-
-/* define if the compiler has hash_map */
-#define HAVE_HASH_MAP 1
-
-/* define if the compiler has hash_set */
-#define HAVE_HASH_SET 1
-
-/* Define to 1 if you have the <inttypes.h> header file. */
-#undef HAVE_INTTYPES_H
-
-/* Define to 1 if you have the <libunwind.h> header file. */
-#undef HAVE_LIBUNWIND_H
-
-/* Define to 1 if you have the <linux/ptrace.h> header file. */
-#undef HAVE_LINUX_PTRACE_H
-
-/* Define to 1 if you have the <malloc.h> header file. */
-#undef HAVE_MALLOC_H
-
-/* Define to 1 if you have the <memory.h> header file. */
-#undef HAVE_MEMORY_H
-
-/* Define to 1 if you have a working `mmap' system call. */
-#undef HAVE_MMAP
-
-/* define if the compiler implements namespaces */
-#define HAVE_NAMESPACES 1
-
-/* define if libc has program_invocation_name */
-#undef HAVE_PROGRAM_INVOCATION_NAME
-
-/* Define if you have POSIX threads libraries and header files. */
-#undef HAVE_PTHREAD
-
-/* Define to 1 if you have the <pwd.h> header file. */
-#undef HAVE_PWD_H
-
-/* Define to 1 if you have the `sbrk' function. */
-#undef HAVE_SBRK
-
-/* Define to 1 if you have the <stdint.h> header file. */
-#undef HAVE_STDINT_H
-
-/* Define to 1 if you have the <stdlib.h> header file. */
-#define HAVE_STDLIB_H 1
-
-/* Define to 1 if you have the <strings.h> header file. */
-#undef HAVE_STRINGS_H
-
-/* Define to 1 if you have the <string.h> header file. */
-#define HAVE_STRING_H 1
-
-/* Define to 1 if the system has the type `struct mallinfo'. */
-#undef HAVE_STRUCT_MALLINFO
-
-/* Define to 1 if you have the <sys/prctl.h> header file. */
-#undef HAVE_SYS_PRCTL_H
-
-/* Define to 1 if you have the <sys/resource.h> header file. */
-#undef HAVE_SYS_RESOURCE_H
-
-/* Define to 1 if you have the <sys/socket.h> header file. */
-#undef HAVE_SYS_SOCKET_H
-
-/* Define to 1 if you have the <sys/stat.h> header file. */
-#define HAVE_SYS_STAT_H 1
-
-/* Define to 1 if you have the <sys/syscall.h> header file. */
-#undef HAVE_SYS_SYSCALL_H
-
-/* Define to 1 if you have the <sys/types.h> header file. */
-#define HAVE_SYS_TYPES_H 1
-
-/* Define to 1 if you have the <sys/wait.h> header file. */
-#undef HAVE_SYS_WAIT_H
-
-/* Define to 1 if compiler supports __thread */
-#undef HAVE_TLS
-
-/* Define to 1 if you have the <ucontext.h> header file. */
-#undef HAVE_UCONTEXT_H
-
-/* Define to 1 if you have the <unistd.h> header file. */
-#undef HAVE_UNISTD_H
-
-/* Define to 1 if you have the <unwind.h> header file. */
-#undef HAVE_UNWIND_H
-
-/* define if your compiler has __attribute__ */
-#undef HAVE___ATTRIBUTE__
-
-/* Define to 1 if the system has the type `__int64'. */
-#define HAVE___INT64 1
-
-/* prefix where we look for installed files */
-#undef INSTALL_PREFIX
-
-/* Define to 1 if int32_t is equivalent to intptr_t */
-#undef INT32_EQUALS_INTPTR
-
-/* Define to 1 if your C compiler doesn't accept -c and -o together. */
-#undef NO_MINUS_C_MINUS_O
-
-/* Name of package */
-#undef PACKAGE
-
-/* Define to the address where bug reports for this package should be sent. */
-#undef PACKAGE_BUGREPORT
-
-/* Define to the full name of this package. */
-#undef PACKAGE_NAME
-
-/* Define to the full name and version of this package. */
-#undef PACKAGE_STRING
-
-/* Define to the one symbol short name of this package. */
-#undef PACKAGE_TARNAME
-
-/* Define to the version of this package. */
-#undef PACKAGE_VERSION
-
-/* How to access the PC from a struct ucontext */
-#undef PC_FROM_UCONTEXT
-
-/* Always the empty-string on non-windows systems. On windows, should be
- "__declspec(dllexport)". This way, when we compile the dll, we export our
- functions/classes. It's safe to define this here because config.h is only
- used internally, to compile the DLL, and every DLL source file #includes
- "config.h" before anything else. */
-#ifndef PERFTOOLS_DLL_DECL
-# define PERFTOOLS_IS_A_DLL 1 /* not set if you're statically linking */
-# define PERFTOOLS_DLL_DECL __declspec(dllexport)
-# define PERFTOOLS_DLL_DECL_FOR_UNITTESTS __declspec(dllimport)
-#endif
-
-/* printf format code for printing a size_t and ssize_t */
-#define PRIdS "Id"
-
-/* printf format code for printing a size_t and ssize_t */
-#define PRIuS "Iu"
-
-/* printf format code for printing a size_t and ssize_t */
-#define PRIxS "Ix"
-
-/* Define to necessary symbol if this constant uses a non-standard name on
- your system. */
-#undef PTHREAD_CREATE_JOINABLE
-
-/* Define to 1 if you have the ANSI C header files. */
-#define STDC_HEADERS 1
-
-/* the namespace where STL code like vector<> is defined */
-#define STL_NAMESPACE std
-
-/* Version number of package */
-#undef VERSION
-
-/* C99 says: define this to get the PRI... macros from stdint.h */
-#ifndef __STDC_FORMAT_MACROS
-# define __STDC_FORMAT_MACROS 1
-#endif
-
-/* Define to `__inline__' or `__inline' if that's what the C compiler
- calls it, or to nothing if 'inline' is not supported under any name. */
-#ifndef __cplusplus
-#undef inline
-#endif
-
-// ---------------------------------------------------------------------
-// Extra stuff not found in config.h.in
-
-// This must be defined before the windows.h is included. It's needed
-// for mutex.h, to give access to the TryLock method.
-#ifndef _WIN32_WINNT
-# define _WIN32_WINNT 0x0400
-#endif
-
-// We want to make sure not to ever try to #include heap-checker.h
-#define NO_HEAP_CHECK 1
-
-// TODO(csilvers): include windows/port.h in every relevant source file instead?
-#include "windows/port.h"
-
-#endif /* GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_ */
diff --git a/third_party/tcmalloc/generic_allocators.cc b/third_party/tcmalloc/generic_allocators.cc
deleted file mode 100644
index 124cfc2..0000000
--- a/third_party/tcmalloc/generic_allocators.cc
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// When possible, we implement allocator functions on top of the basic
-// low-level functions malloc() and free(). This way, including a new
-// allocator is as simple as providing just a small interface.
-//
-// As such, this file should not contain any allocator-specific code.
-
-// Implement a C++ style allocation, which always calls the new_handler
-// on failure.
-inline void* generic_cpp_alloc(size_t size, bool nothrow) {
- void* ptr;
- for (;;) {
- ptr = malloc(size);
- if (ptr)
- return ptr;
- if (!call_new_handler(nothrow))
- break;
- }
- return ptr;
-}
-
-extern "C++" {
-
-void* __cdecl operator new(size_t size) {
- return generic_cpp_alloc(size, false);
-}
-
-void operator delete(void* p) __THROW {
- free(p);
-}
-
-void* operator new[](size_t size) {
- return generic_cpp_alloc(size, false);
-}
-
-void operator delete[](void* p) __THROW {
- free(p);
-}
-
-void* operator new(size_t size, const std::nothrow_t& nt) __THROW {
- return generic_cpp_alloc(size, true);
-}
-
-void* operator new[](size_t size, const std::nothrow_t& nt) __THROW {
- return generic_cpp_alloc(size, true);
-}
-
-} // extern "C++"
-
-extern "C" {
-
-// This function behaves similarly to MSVC's _set_new_mode.
-// If flag is 0 (default), calls to malloc will behave normally.
-// If flag is 1, calls to malloc will behave like calls to new,
-// and the std_new_handler will be invoked on failure.
-// Returns the previous mode.
-int _set_new_mode(int flag) __THROW {
- int old_mode = new_mode;
- new_mode = flag;
- return old_mode;
-}
-
-void* calloc(size_t n, size_t elem_size) __THROW {
- // Overflow check
- const size_t size = n * elem_size;
- if (elem_size != 0 && size / elem_size != n) return NULL;
-
- void* result = malloc(size);
- if (result != NULL) {
- memset(result, 0, size);
- }
- return result;
-}
-
-void cfree(void* p) __THROW {
- free(p);
-}
-
-#ifdef WIN32
-
-void* _recalloc(void* p, size_t n, size_t elem_size) {
- if (!p)
- return calloc(n, elem_size);
-
- // This API is a bit odd.
- // Note: recalloc only guarantees zeroed memory when p is NULL.
- // Generally, calls to malloc() have padding. So a request
- // to malloc N bytes actually malloc's N+x bytes. Later, if
- // that buffer is passed to recalloc, we don't know what N
- // was anymore. We only know what N+x is. As such, there is
- // no way to know what to zero out.
- const size_t size = n * elem_size;
- if (elem_size != 0 && size / elem_size != n) return NULL;
- return realloc(p, size);
-}
-
-void* _calloc_impl(size_t n, size_t size) {
- return calloc(n, size);
-}
-
-#ifndef NDEBUG
-#undef malloc
-#undef free
-#undef calloc
-int _CrtDbgReport(int, const char*, int, const char*, const char*, ...) {
- return 0;
-}
-
-int _CrtDbgReportW(int, const wchar_t*, int, const wchar_t*,
- const wchar_t*, ...) {
- return 0;
-}
-
-int _CrtSetReportMode(int, int) {
- return 0;
-}
-
-void* _malloc_dbg(size_t size, int , const char*, int) {
- return malloc(size);
-}
-
-void _free_dbg(void* ptr, int) {
- free(ptr);
-}
-
-void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
- return calloc(n, size);
-}
-#endif // NDEBUG
-
-#endif // WIN32
-
-} // extern C
-
diff --git a/third_party/tcmalloc/google/tcmalloc.h b/third_party/tcmalloc/google/tcmalloc.h
deleted file mode 100644
index 90dc0fe..0000000
--- a/third_party/tcmalloc/google/tcmalloc.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* Copyright (c) 2003, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Sanjay Ghemawat <opensource@google.com>
- * .h file by Craig Silverstein <opensource@google.com>
- */
-
-#ifndef TCMALLOC_TCMALLOC_H_
-#define TCMALLOC_TCMALLOC_H_
-
-// __THROW is defined in glibc systems. It means, counter-intuitively,
-// "This function will never throw an exception." It's an optional
-// optimization tool, but we may need to use it to match glibc prototypes.
-#ifndef __THROW /* I guess we're not on a glibc system */
-# define __THROW /* __THROW is just an optimization, so ok to make it "" */
-#endif
-
-#include <stdlib.h> // for struct mallinfo, if it's defined
-
-#ifdef __cplusplus
-#include <new> // for nothrow_t
-extern "C" {
-#endif
- void* tc_malloc(size_t size) __THROW;
- void tc_free(void* ptr) __THROW;
- void* tc_realloc(void* ptr, size_t size) __THROW;
- void* tc_calloc(size_t nmemb, size_t size) __THROW;
- void tc_cfree(void* ptr) __THROW;
-
- void* tc_memalign(size_t __alignment, size_t __size) __THROW;
- int tc_posix_memalign(void** ptr, size_t align, size_t size) __THROW;
- void* tc_valloc(size_t __size) __THROW;
- void* tc_pvalloc(size_t __size) __THROW;
-
- void tc_malloc_stats(void) __THROW;
- int tc_mallopt(int cmd, int value) __THROW;
-// TODO(willchan): Fork this for Linux/Windows if we need this on Linux.
-#if 0
- struct mallinfo tc_mallinfo(void) __THROW;
-#endif
-
-#ifdef __cplusplus
- void* tc_new(size_t size);
- void tc_delete(void* p) __THROW;
- void* tc_newarray(size_t size);
- void tc_deletearray(void* p) __THROW;
-
- void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW;
- void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW;
-}
-#endif
-
-#endif // #ifndef TCMALLOC_TCMALLOC_H_
diff --git a/third_party/tcmalloc/heap-checker.h b/third_party/tcmalloc/heap-checker.h
deleted file mode 100644
index 751eb9f..0000000
--- a/third_party/tcmalloc/heap-checker.h
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Maxim Lifantsev (with design ideas by Sanjay Ghemawat)
-//
-//
-// Module for detecing heap (memory) leaks.
-//
-// For full(er) information, see doc/heap_checker.html
-//
-// This module can be linked into programs with
-// no slowdown caused by this unless you activate the leak-checker:
-//
-// 1. Set the environment variable HEAPCHEK to _type_ before
-// running the program.
-//
-// _type_ is usually "normal" but can also be "minimal", "strict", or
-// "draconian". (See the html file for other options, like 'local'.)
-//
-// After that, just run your binary. If the heap-checker detects
-// a memory leak at program-exit, it will print instructions on how
-// to track down the leak.
-
-#ifndef BASE_HEAP_CHECKER_H_
-#define BASE_HEAP_CHECKER_H_
-
-#include "config.h"
-
-#include <sys/types.h> // for size_t
-#ifdef HAVE_STDINT_H
-#include <stdint.h> // for uintptr_t
-#endif
-#include <stdarg.h> // for va_list
-#include <vector>
-
-// Annoying stuff for windows -- makes sure clients can import these functions
-#ifndef PERFTOOLS_DLL_DECL
-# ifdef _WIN32
-# define PERFTOOLS_DLL_DECL __declspec(dllimport)
-# else
-# define PERFTOOLS_DLL_DECL
-# endif
-#endif
-
-
-// The class is thread-safe with respect to all the provided static methods,
-// as well as HeapLeakChecker objects: they can be accessed by multiple threads.
-class PERFTOOLS_DLL_DECL HeapLeakChecker {
- public:
-
- // ----------------------------------------------------------------------- //
- // Static functions for working with (whole-program) leak checking.
-
- // If heap leak checking is currently active in some mode
- // e.g. if leak checking was started (and is still active now)
- // due to HEAPCHECK=... defined in the environment.
- // The return value reflects iff HeapLeakChecker objects manually
- // constructed right now will be doing leak checking or nothing.
- // Note that we can go from active to inactive state during InitGoogle()
- // if FLAGS_heap_check gets set to "" by some code before/during InitGoogle().
- static bool IsActive();
-
- // Return pointer to the whole-program checker if it has been created
- // and NULL otherwise.
- // Once GlobalChecker() returns non-NULL that object will not disappear and
- // will be returned by all later GlobalChecker calls.
- // This is mainly to access BytesLeaked() and ObjectsLeaked() (see below)
- // for the whole-program checker after one calls NoGlobalLeaks()
- // or similar and gets false.
- static HeapLeakChecker* GlobalChecker();
-
- // Do whole-program leak check now (if it was activated for this binary);
- // return false only if it was activated and has failed.
- // The mode of the check is controlled by the command-line flags.
- // This method can be called repeatedly.
- // Things like GlobalChecker()->SameHeap() can also be called explicitly
- // to do the desired flavor of the check.
- static bool NoGlobalLeaks();
-
- // If whole-program checker if active,
- // cancel its automatic execution after main() exits.
- // This requires that some leak check (e.g. NoGlobalLeaks())
- // has been called at least once on the whole-program checker.
- static void CancelGlobalCheck();
-
- // ----------------------------------------------------------------------- //
- // Non-static functions for starting and doing leak checking.
-
- // Start checking and name the leak check performed.
- // The name is used in naming dumped profiles
- // and needs to be unique only within your binary.
- // It must also be a string that can be a part of a file name,
- // in particular not contain path expressions.
- explicit HeapLeakChecker(const char *name);
-
- // Destructor (verifies that some *NoLeaks or *SameHeap method
- // has been called at least once).
- ~HeapLeakChecker();
-
- // These used to be different but are all the same now: they return
- // true iff all memory allocated since this HeapLeakChecker object
- // was constructor is still reachable from global state.
- //
- // Because we fork to convert addresses to symbol-names, and forking
- // is not thread-safe, and we may be called in a threaded context,
- // we do not try to symbolize addresses when called manually.
- bool NoLeaks() { return DoNoLeaks(DO_NOT_SYMBOLIZE); }
-
- // These forms are obsolete; use NoLeaks() instead.
- // TODO(csilvers): mark with ATTRIBUTE_DEPRECATED.
- bool QuickNoLeaks() { return NoLeaks(); }
- bool BriefNoLeaks() { return NoLeaks(); }
- bool SameHeap() { return NoLeaks(); }
- bool QuickSameHeap() { return NoLeaks(); }
- bool BriefSameHeap() { return NoLeaks(); }
-
- // Detailed information about the number of leaked bytes and objects
- // (both of these can be negative as well).
- // These are available only after a *SameHeap or *NoLeaks
- // method has been called.
- // Note that it's possible for both of these to be zero
- // while SameHeap() or NoLeaks() returned false in case
- // of a heap state change that is significant
- // but preserves the byte and object counts.
- ssize_t BytesLeaked() const;
- ssize_t ObjectsLeaked() const;
-
- // ----------------------------------------------------------------------- //
- // Static helpers to make us ignore certain leaks.
-
- // Scoped helper class. Should be allocated on the stack inside a
- // block of code. Any heap allocations done in the code block
- // covered by the scoped object (including in nested function calls
- // done by the code block) will not be reported as leaks. This is
- // the recommended replacement for the GetDisableChecksStart() and
- // DisableChecksToHereFrom() routines below.
- //
- // Example:
- // void Foo() {
- // HeapLeakChecker::Disabler disabler;
- // ... code that allocates objects whose leaks should be ignored ...
- // }
- //
- // REQUIRES: Destructor runs in same thread as constructor
- class Disabler {
- public:
- Disabler();
- ~Disabler();
- private:
- Disabler(const Disabler&); // disallow copy
- void operator=(const Disabler&); // and assign
- };
-
- // Ignore an object located at 'ptr' (can go at the start or into the object)
- // as well as all heap objects (transitively) referenced from it
- // for the purposes of heap leak checking.
- // If 'ptr' does not point to an active allocated object
- // at the time of this call, it is ignored;
- // but if it does, the object must not get deleted from the heap later on;
- // it must also be not already ignored at the time of this call.
- //
- // See also HiddenPointer, below, if you need to prevent a pointer from
- // being traversed by the heap checker but do not wish to transitively
- // whitelist objects referenced through it.
- static void IgnoreObject(const void* ptr);
-
- // Undo what an earlier IgnoreObject() call promised and asked to do.
- // At the time of this call 'ptr' must point at or inside of an active
- // allocated object which was previously registered with IgnoreObject().
- static void UnIgnoreObject(const void* ptr);
-
- // ----------------------------------------------------------------------- //
- // Initialization; to be called from main() only.
-
- // Full starting of recommended whole-program checking.
- static void InternalInitStart();
-
- // ----------------------------------------------------------------------- //
- // Internal types defined in .cc
-
- class Allocator;
- struct RangeValue;
-
- private:
-
- // ----------------------------------------------------------------------- //
- // Various helpers
-
- // Create the name of the heap profile file.
- // Should be deleted via Allocator::Free().
- char* MakeProfileNameLocked();
-
- // Helper for constructors
- void Create(const char *name, bool make_start_snapshot);
-
- enum ShouldSymbolize { SYMBOLIZE, DO_NOT_SYMBOLIZE };
-
- // Helper for *NoLeaks and *SameHeap
- bool DoNoLeaks(ShouldSymbolize should_symbolize);
-
- // These used to be public, but they are now deprecated.
- // Will remove entirely when all internal uses are fixed.
- // In the meantime, use friendship so the unittest can still test them.
- static void* GetDisableChecksStart();
- static void DisableChecksToHereFrom(const void* start_address);
- static void DisableChecksIn(const char* pattern);
- friend void RangeDisabledLeaks();
- friend void NamedTwoDisabledLeaks();
- friend void* RunNamedDisabledLeaks(void*);
- friend void TestHeapLeakCheckerNamedDisabling();
- friend int main(int, char**);
-
-
- // Helper for DisableChecksIn
- static void DisableChecksInLocked(const char* pattern);
-
- // Disable checks based on stack trace entry at a depth <=
- // max_depth. Used to hide allocations done inside some special
- // libraries.
- static void DisableChecksFromToLocked(const void* start_address,
- const void* end_address,
- int max_depth);
-
- // Helper for DoNoLeaks to ignore all objects reachable from all live data
- static void IgnoreAllLiveObjectsLocked(const void* self_stack_top);
-
- // Callback we pass to ListAllProcessThreads (see thread_lister.h)
- // that is invoked when all threads of our process are found and stopped.
- // The call back does the things needed to ignore live data reachable from
- // thread stacks and registers for all our threads
- // as well as do other global-live-data ignoring
- // (via IgnoreNonThreadLiveObjectsLocked)
- // during the quiet state of all threads being stopped.
- // For the argument meaning see the comment by ListAllProcessThreads.
- // Here we only use num_threads and thread_pids, that ListAllProcessThreads
- // fills for us with the number and pids of all the threads of our process
- // it found and attached to.
- static int IgnoreLiveThreadsLocked(void* parameter,
- int num_threads,
- pid_t* thread_pids,
- va_list ap);
-
- // Helper for IgnoreAllLiveObjectsLocked and IgnoreLiveThreadsLocked
- // that we prefer to execute from IgnoreLiveThreadsLocked
- // while all threads are stopped.
- // This helper does live object discovery and ignoring
- // for all objects that are reachable from everything
- // not related to thread stacks and registers.
- static void IgnoreNonThreadLiveObjectsLocked();
-
- // Helper for IgnoreNonThreadLiveObjectsLocked and IgnoreLiveThreadsLocked
- // to discover and ignore all heap objects
- // reachable from currently considered live objects
- // (live_objects static global variable in out .cc file).
- // "name", "name2" are two strings that we print one after another
- // in a debug message to describe what kind of live object sources
- // are being used.
- static void IgnoreLiveObjectsLocked(const char* name, const char* name2);
-
- // Runs REGISTER_HEAPCHECK_CLEANUP cleanups and potentially
- // calls DoMainHeapCheck
- static void RunHeapCleanups();
-
- // Do the overall whole-program heap leak check if needed;
- // returns true when did the leak check.
- static bool DoMainHeapCheck();
-
- // Type of task for UseProcMapsLocked
- enum ProcMapsTask {
- RECORD_GLOBAL_DATA,
- DISABLE_LIBRARY_ALLOCS
- };
-
- // Success/Error Return codes for UseProcMapsLocked.
- enum ProcMapsResult {
- PROC_MAPS_USED,
- CANT_OPEN_PROC_MAPS,
- NO_SHARED_LIBS_IN_PROC_MAPS
- };
-
- // Read /proc/self/maps, parse it, and do the 'proc_maps_task' for each line.
- static ProcMapsResult UseProcMapsLocked(ProcMapsTask proc_maps_task);
-
- // A ProcMapsTask to disable allocations from 'library'
- // that is mapped to [start_address..end_address)
- // (only if library is a certain system library).
- static void DisableLibraryAllocsLocked(const char* library,
- uintptr_t start_address,
- uintptr_t end_address);
-
- // Return true iff "*ptr" points to a heap object
- // ("*ptr" can point at the start or inside of a heap object
- // so that this works e.g. for pointers to C++ arrays, C++ strings,
- // multiple-inherited objects, or pointers to members).
- // We also fill *object_size for this object then
- // and we move "*ptr" to point to the very start of the heap object.
- static inline bool HaveOnHeapLocked(const void** ptr, size_t* object_size);
-
- // Helper to shutdown heap leak checker when it's not needed
- // or can't function properly.
- static void TurnItselfOffLocked();
-
- // Internally-used c-tor to start whole-executable checking.
- HeapLeakChecker();
-
- // ----------------------------------------------------------------------- //
- // Friends and externally accessed helpers.
-
- // Helper for VerifyHeapProfileTableStackGet in the unittest
- // to get the recorded allocation caller for ptr,
- // which must be a heap object.
- static const void* GetAllocCaller(void* ptr);
- friend void VerifyHeapProfileTableStackGet();
-
- // This gets to execute before constructors for all global objects
- static void BeforeConstructorsLocked();
- friend void HeapLeakChecker_BeforeConstructors();
-
- // This gets to execute after destructors for all global objects
- friend void HeapLeakChecker_AfterDestructors();
-
- // ----------------------------------------------------------------------- //
- // Member data.
-
- class SpinLock* lock_; // to make HeapLeakChecker objects thread-safe
- const char* name_; // our remembered name (we own it)
- // NULL means this leak checker is a noop
-
- // Snapshot taken when the checker was created. May be NULL
- // for the global heap checker object. We use void* instead of
- // HeapProfileTable::Snapshot* to avoid including heap-profile-table.h.
- void* start_snapshot_;
-
- bool has_checked_; // if we have done the leak check, so these are ready:
- ssize_t inuse_bytes_increase_; // bytes-in-use increase for this checker
- ssize_t inuse_allocs_increase_; // allocations-in-use increase
- // for this checker
- bool keep_profiles_; // iff we should keep the heap profiles we've made
-
- // ----------------------------------------------------------------------- //
-
- // Disallow "evil" constructors.
- HeapLeakChecker(const HeapLeakChecker&);
- void operator=(const HeapLeakChecker&);
-};
-
-
-// Holds a pointer that will not be traversed by the heap checker.
-// Contrast with HeapLeakChecker::IgnoreObject(o), in which o and
-// all objects reachable from o are ignored by the heap checker.
-template <class T>
-class HiddenPointer {
- public:
- explicit HiddenPointer(T* t)
- : masked_t_(reinterpret_cast<uintptr_t>(t) ^ kHideMask) {
- }
- // Returns unhidden pointer. Be careful where you save the result.
- T* get() const { return reinterpret_cast<T*>(masked_t_ ^ kHideMask); }
-
- private:
- // Arbitrary value, but not such that xor'ing with it is likely
- // to map one valid pointer to another valid pointer:
- static const uintptr_t kHideMask =
- static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll);
- uintptr_t masked_t_;
-};
-
-// A class that exists solely to run its destructor. This class should not be
-// used directly, but instead by the REGISTER_HEAPCHECK_CLEANUP macro below.
-class PERFTOOLS_DLL_DECL HeapCleaner {
- public:
- typedef void (*void_function)(void);
- HeapCleaner(void_function f);
- static void RunHeapCleanups();
- private:
- static std::vector<void_function>* heap_cleanups_;
-};
-
-// A macro to declare module heap check cleanup tasks
-// (they run only if we are doing heap leak checking.)
-// 'body' should be the cleanup code to run. 'name' doesn't matter,
-// but must be unique amongst all REGISTER_HEAPCHECK_CLEANUP calls.
-#define REGISTER_HEAPCHECK_CLEANUP(name, body) \
- namespace { \
- void heapcheck_cleanup_##name() { body; } \
- static HeapCleaner heapcheck_cleaner_##name(&heapcheck_cleanup_##name); \
- }
-
-#endif // BASE_HEAP_CHECKER_H_
diff --git a/third_party/tcmalloc/jemalloc/jemalloc.c b/third_party/tcmalloc/jemalloc/jemalloc.c
deleted file mode 100644
index 65eb0b3..0000000
--- a/third_party/tcmalloc/jemalloc/jemalloc.c
+++ /dev/null
@@ -1,7276 +0,0 @@
-/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
-/* vim:set softtabstop=8 shiftwidth=8: */
-/*-
- * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice(s), this list of conditions and the following disclaimer as
- * the first lines of this file unmodified other than the possible
- * addition of one or more copyright notices.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice(s), this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *******************************************************************************
- *
- * This allocator implementation is designed to provide scalable performance
- * for multi-threaded programs on multi-processor systems. The following
- * features are included for this purpose:
- *
- * + Multiple arenas are used if there are multiple CPUs, which reduces lock
- * contention and cache sloshing.
- *
- * + Cache line sharing between arenas is avoided for internal data
- * structures.
- *
- * + Memory is managed in chunks and runs (chunks can be split into runs),
- * rather than as individual pages. This provides a constant-time
- * mechanism for associating allocations with particular arenas.
- *
- * Allocation requests are rounded up to the nearest size class, and no record
- * of the original request size is maintained. Allocations are broken into
- * categories according to size class. Assuming runtime defaults, 4 kB pages
- * and a 16 byte quantum on a 32-bit system, the size classes in each category
- * are as follows:
- *
- * |=====================================|
- * | Category | Subcategory | Size |
- * |=====================================|
- * | Small | Tiny | 2 |
- * | | | 4 |
- * | | | 8 |
- * | |----------------+---------|
- * | | Quantum-spaced | 16 |
- * | | | 32 |
- * | | | 48 |
- * | | | ... |
- * | | | 480 |
- * | | | 496 |
- * | | | 512 |
- * | |----------------+---------|
- * | | Sub-page | 1 kB |
- * | | | 2 kB |
- * |=====================================|
- * | Large | 4 kB |
- * | | 8 kB |
- * | | 12 kB |
- * | | ... |
- * | | 1012 kB |
- * | | 1016 kB |
- * | | 1020 kB |
- * |=====================================|
- * | Huge | 1 MB |
- * | | 2 MB |
- * | | 3 MB |
- * | | ... |
- * |=====================================|
- *
- * A different mechanism is used for each category:
- *
- * Small : Each size class is segregated into its own set of runs. Each run
- * maintains a bitmap of which regions are free/allocated.
- *
- * Large : Each allocation is backed by a dedicated run. Metadata are stored
- * in the associated arena chunk header maps.
- *
- * Huge : Each allocation is backed by a dedicated contiguous set of chunks.
- * Metadata are stored in a separate red-black tree.
- *
- *******************************************************************************
- */
-
-/*
- * NOTE(mbelshe): Added these defines to fit within chromium build system.
- */
-#define MOZ_MEMORY_WINDOWS
-#define MOZ_MEMORY
-#define DONT_OVERRIDE_LIBC
-
-/*
- * MALLOC_PRODUCTION disables assertions and statistics gathering. It also
- * defaults the A and J runtime options to off. These settings are appropriate
- * for production systems.
- */
-#ifndef MOZ_MEMORY_DEBUG
-# define MALLOC_PRODUCTION
-#endif
-
-/*
- * Use only one arena by default. Mozilla does not currently make extensive
- * use of concurrent allocation, so the increased fragmentation associated with
- * multiple arenas is not warranted.
- */
-#define MOZ_MEMORY_NARENAS_DEFAULT_ONE
-
-/*
- * MALLOC_STATS enables statistics calculation, and is required for
- * jemalloc_stats().
- */
-#define MALLOC_STATS
-
-#ifndef MALLOC_PRODUCTION
- /*
- * MALLOC_DEBUG enables assertions and other sanity checks, and disables
- * inline functions.
- */
-# define MALLOC_DEBUG
-
- /* Memory filling (junk/zero). */
-# define MALLOC_FILL
-
- /* Allocation tracing. */
-# ifndef MOZ_MEMORY_WINDOWS
-# define MALLOC_UTRACE
-# endif
-
- /* Support optional abort() on OOM. */
-# define MALLOC_XMALLOC
-
- /* Support SYSV semantics. */
-# define MALLOC_SYSV
-#endif
-
-/*
- * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
- * validation. There are many possible errors that validation does not even
- * attempt to detect.
- */
-#define MALLOC_VALIDATE
-
-/* Embed no-op macros that support memory allocation tracking via valgrind. */
-#ifdef MOZ_VALGRIND
-# define MALLOC_VALGRIND
-#endif
-#ifdef MALLOC_VALGRIND
-# include <valgrind/valgrind.h>
-#else
-# define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
-# define VALGRIND_FREELIKE_BLOCK(addr, rzB)
-#endif
-
-/*
- * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
- * re-balances arena load if exponentially averaged contention exceeds a
- * certain threshold.
- */
-/* #define MALLOC_BALANCE */
-
-#if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN))
- /*
- * MALLOC_PAGEFILE causes all mmap()ed memory to be backed by temporary
- * files, so that if a chunk is mapped, it is guaranteed to be swappable.
- * This avoids asynchronous OOM failures that are due to VM over-commit.
- *
- * XXX OS X over-commits, so we should probably use mmap() instead of
- * vm_allocate(), so that MALLOC_PAGEFILE works.
- */
-#define MALLOC_PAGEFILE
-#endif
-
-#ifdef MALLOC_PAGEFILE
-/* Write size when initializing a page file. */
-# define MALLOC_PAGEFILE_WRITE_SIZE 512
-#endif
-
-#ifdef MOZ_MEMORY_LINUX
-#define _GNU_SOURCE /* For mremap(2). */
-#define issetugid() 0
-#if 0 /* Enable in order to test decommit code on Linux. */
-# define MALLOC_DECOMMIT
-#endif
-#endif
-
-#ifndef MOZ_MEMORY_WINCE
-#include <sys/types.h>
-
-#include <errno.h>
-#include <stdlib.h>
-#endif
-#include <limits.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <string.h>
-
-#ifdef MOZ_MEMORY_WINDOWS
-#ifndef MOZ_MEMORY_WINCE
-//#include <cruntime.h>
-//#include <internal.h>
-#include <io.h>
-#else
-#include <cmnintrin.h>
-#include <crtdefs.h>
-#define SIZE_MAX UINT_MAX
-#endif
-#include <windows.h>
-
-#pragma warning( disable: 4267 4996 4146 )
-
-#define false FALSE
-#define true TRUE
-#define inline __inline
-#define SIZE_T_MAX SIZE_MAX
-#define STDERR_FILENO 2
-#define PATH_MAX MAX_PATH
-#define vsnprintf _vsnprintf
-
-#ifndef NO_TLS
-static unsigned long tlsIndex = 0xffffffff;
-#endif
-
-#define __thread
-#ifdef MOZ_MEMORY_WINCE
-#define _pthread_self() GetCurrentThreadId()
-#else
-#define _pthread_self() __threadid()
-#endif
-#define issetugid() 0
-
-#ifndef MOZ_MEMORY_WINCE
-/* use MSVC intrinsics */
-#pragma intrinsic(_BitScanForward)
-static __forceinline int
-ffs(int x)
-{
- unsigned long i;
-
- if (_BitScanForward(&i, x) != 0)
- return (i + 1);
-
- return (0);
-}
-
-/* Implement getenv without using malloc */
-static char mozillaMallocOptionsBuf[64];
-
-#define getenv xgetenv
-static char *
-getenv(const char *name)
-{
-
- if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf,
- sizeof(mozillaMallocOptionsBuf)) > 0)
- return (mozillaMallocOptionsBuf);
-
- return (NULL);
-}
-
-#else /* WIN CE */
-
-#define ENOMEM 12
-#define EINVAL 22
-
-static __forceinline int
-ffs(int x)
-{
-
- return 32 - _CountLeadingZeros((-x) & x);
-}
-#endif
-
-typedef unsigned char uint8_t;
-typedef unsigned uint32_t;
-typedef unsigned long long uint64_t;
-typedef unsigned long long uintmax_t;
-typedef long ssize_t;
-
-#define MALLOC_DECOMMIT
-#endif
-
-#ifndef MOZ_MEMORY_WINDOWS
-#ifndef MOZ_MEMORY_SOLARIS
-#include <sys/cdefs.h>
-#endif
-#ifndef __DECONST
-# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
-#endif
-#ifndef MOZ_MEMORY
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z jasone $");
-#include "libc_private.h"
-#ifdef MALLOC_DEBUG
-# define _LOCK_DEBUG
-#endif
-#include "spinlock.h"
-#include "namespace.h"
-#endif
-#include <sys/mman.h>
-#ifndef MADV_FREE
-# define MADV_FREE MADV_DONTNEED
-#endif
-#ifndef MAP_NOSYNC
-# define MAP_NOSYNC 0
-#endif
-#include <sys/param.h>
-#ifndef MOZ_MEMORY
-#include <sys/stddef.h>
-#endif
-#include <sys/time.h>
-#include <sys/types.h>
-#ifndef MOZ_MEMORY_SOLARIS
-#include <sys/sysctl.h>
-#endif
-#include <sys/uio.h>
-#ifndef MOZ_MEMORY
-#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
-
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/vmparam.h>
-#endif
-
-#include <errno.h>
-#include <limits.h>
-#ifndef SIZE_T_MAX
-# define SIZE_T_MAX SIZE_MAX
-#endif
-#include <pthread.h>
-#ifdef MOZ_MEMORY_DARWIN
-#define _pthread_self pthread_self
-#define _pthread_mutex_init pthread_mutex_init
-#define _pthread_mutex_trylock pthread_mutex_trylock
-#define _pthread_mutex_lock pthread_mutex_lock
-#define _pthread_mutex_unlock pthread_mutex_unlock
-#endif
-#include <sched.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-#ifndef MOZ_MEMORY_DARWIN
-#include <strings.h>
-#endif
-#include <unistd.h>
-
-#ifdef MOZ_MEMORY_DARWIN
-#include <libkern/OSAtomic.h>
-#include <mach/mach_error.h>
-#include <mach/mach_init.h>
-#include <mach/vm_map.h>
-#include <malloc/malloc.h>
-#endif
-
-#ifndef MOZ_MEMORY
-#include "un-namespace.h"
-#endif
-
-#endif
-
-#include "jemalloc.h"
-
-#undef bool
-#define bool jemalloc_bool
-
-#ifdef MOZ_MEMORY_DARWIN
-static const bool __isthreaded = true;
-#endif
-
-#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
-#define JEMALLOC_USES_MAP_ALIGN /* Required on Solaris 10. Might improve performance elsewhere. */
-#endif
-
-#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
-#define JEMALLOC_USES_MAP_ALIGN /* Required for Windows CE < 6 */
-#endif
-
-#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
-
-#include "qr.h"
-#include "ql.h"
-#ifdef MOZ_MEMORY_WINDOWS
- /* MSVC++ does not support C99 variable-length arrays. */
-# define RB_NO_C99_VARARRAYS
-#endif
-#include "rb.h"
-
-#ifdef MALLOC_DEBUG
- /* Disable inlining to make debugging easier. */
-#ifdef inline
-#undef inline
-#endif
-
-# define inline
-#endif
-
-/* Size of stack-allocated buffer passed to strerror_r(). */
-#define STRERROR_BUF 64
-
-/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
-# define QUANTUM_2POW_MIN 4
-#ifdef MOZ_MEMORY_SIZEOF_PTR_2POW
-# define SIZEOF_PTR_2POW MOZ_MEMORY_SIZEOF_PTR_2POW
-#else
-# define SIZEOF_PTR_2POW 2
-#endif
-#define PIC
-#ifndef MOZ_MEMORY_DARWIN
-static const bool __isthreaded = true;
-#else
-# define NO_TLS
-#endif
-#if 0
-#ifdef __i386__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 2
-# define CPU_SPINWAIT __asm__ volatile("pause")
-#endif
-#ifdef __ia64__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-#endif
-#ifdef __alpha__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-# define NO_TLS
-#endif
-#ifdef __sparc64__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-# define NO_TLS
-#endif
-#ifdef __amd64__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 3
-# define CPU_SPINWAIT __asm__ volatile("pause")
-#endif
-#ifdef __arm__
-# define QUANTUM_2POW_MIN 3
-# define SIZEOF_PTR_2POW 2
-# define NO_TLS
-#endif
-#ifdef __mips__
-# define QUANTUM_2POW_MIN 3
-# define SIZEOF_PTR_2POW 2
-# define NO_TLS
-#endif
-#ifdef __powerpc__
-# define QUANTUM_2POW_MIN 4
-# define SIZEOF_PTR_2POW 2
-#endif
-#endif
-
-#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
-
-/* sizeof(int) == (1U << SIZEOF_INT_2POW). */
-#ifndef SIZEOF_INT_2POW
-# define SIZEOF_INT_2POW 2
-#endif
-
-/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
-#if (!defined(PIC) && !defined(NO_TLS))
-# define NO_TLS
-#endif
-
-#ifdef NO_TLS
- /* MALLOC_BALANCE requires TLS. */
-# ifdef MALLOC_BALANCE
-# undef MALLOC_BALANCE
-# endif
-#endif
-
-/*
- * Size and alignment of memory chunks that are allocated by the OS's virtual
- * memory system.
- */
-#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
-#define CHUNK_2POW_DEFAULT 21
-#else
-#define CHUNK_2POW_DEFAULT 20
-#endif
-/* Maximum number of dirty pages per arena. */
-#define DIRTY_MAX_DEFAULT (1U << 10)
-
-/* Default reserve chunks. */
-#define RESERVE_MIN_2POW_DEFAULT 1
-/*
- * Default range (in chunks) between reserve_min and reserve_max, in addition
- * to the mandatory one chunk per arena.
- */
-#ifdef MALLOC_PAGEFILE
-# define RESERVE_RANGE_2POW_DEFAULT 5
-#else
-# define RESERVE_RANGE_2POW_DEFAULT 0
-#endif
-
-/*
- * Maximum size of L1 cache line. This is used to avoid cache line aliasing,
- * so over-estimates are okay (up to a point), but under-estimates will
- * negatively affect performance.
- */
-#define CACHELINE_2POW 6
-#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
-
-/* Smallest size class to support. */
-#define TINY_MIN_2POW 1
-
-/*
- * Maximum size class that is a multiple of the quantum, but not (necessarily)
- * a power of 2. Above this size, allocations are rounded up to the nearest
- * power of 2.
- */
-#define SMALL_MAX_2POW_DEFAULT 9
-#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
-
-/*
- * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
- * as small as possible such that this setting is still honored, without
- * violating other constraints. The goal is to make runs as small as possible
- * without exceeding a per run external fragmentation threshold.
- *
- * We use binary fixed point math for overhead computations, where the binary
- * point is implicitly RUN_BFP bits to the left.
- *
- * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
- * honored for some/all object sizes, since there is one bit of header overhead
- * per object (plus a constant). This constraint is relaxed (ignored) for runs
- * that are so small that the per-region overhead is greater than:
- *
- * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
- */
-#define RUN_BFP 12
-/* \/ Implicit binary fixed point. */
-#define RUN_MAX_OVRHD 0x0000003dU
-#define RUN_MAX_OVRHD_RELAX 0x00001800U
-
-/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */
-#define RUN_MAX_SMALL_2POW 15
-#define RUN_MAX_SMALL (1U << RUN_MAX_SMALL_2POW)
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU. If no such instruction is defined
- * above, make CPU_SPINWAIT a no-op.
- */
-#ifndef CPU_SPINWAIT
-# define CPU_SPINWAIT
-#endif
-
-/*
- * Adaptive spinning must eventually switch to blocking, in order to avoid the
- * potential for priority inversion deadlock. Backing off past a certain point
- * can actually waste time.
- */
-#define SPIN_LIMIT_2POW 11
-
-/*
- * Conversion from spinning to blocking is expensive; we use (1U <<
- * BLOCK_COST_2POW) to estimate how many more times costly blocking is than
- * worst-case spinning.
- */
-#define BLOCK_COST_2POW 4
-
-#ifdef MALLOC_BALANCE
- /*
- * We use an exponential moving average to track recent lock contention,
- * where the size of the history window is N, and alpha=2/(N+1).
- *
- * Due to integer math rounding, very small values here can cause
- * substantial degradation in accuracy, thus making the moving average decay
- * faster than it would with precise calculation.
- */
-# define BALANCE_ALPHA_INV_2POW 9
-
- /*
- * Threshold value for the exponential moving contention average at which to
- * re-assign a thread.
- */
-# define BALANCE_THRESHOLD_DEFAULT (1U << (SPIN_LIMIT_2POW-4))
-#endif
-
-/******************************************************************************/
-
-/*
- * Mutexes based on spinlocks. We can't use normal pthread spinlocks in all
- * places, because they require malloc()ed memory, which causes bootstrapping
- * issues in some cases.
- */
-#if defined(MOZ_MEMORY_WINDOWS)
-#define malloc_mutex_t CRITICAL_SECTION
-#define malloc_spinlock_t CRITICAL_SECTION
-#elif defined(MOZ_MEMORY_DARWIN)
-typedef struct {
- OSSpinLock lock;
-} malloc_mutex_t;
-typedef struct {
- OSSpinLock lock;
-} malloc_spinlock_t;
-#elif defined(MOZ_MEMORY)
-typedef pthread_mutex_t malloc_mutex_t;
-typedef pthread_mutex_t malloc_spinlock_t;
-#else
-/* XXX these should #ifdef these for freebsd (and linux?) only */
-typedef struct {
- spinlock_t lock;
-} malloc_mutex_t;
-typedef malloc_spinlock_t malloc_mutex_t;
-#endif
-
-/* Set to true once the allocator has been initialized. */
-static bool malloc_initialized = false;
-
-#if defined(MOZ_MEMORY_WINDOWS)
-/* No init lock for Windows. */
-#elif defined(MOZ_MEMORY_DARWIN)
-static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
-#elif defined(MOZ_MEMORY_LINUX)
-static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
-#elif defined(MOZ_MEMORY)
-static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
-#else
-static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
-#endif
-
-/******************************************************************************/
-/*
- * Statistics data structures.
- */
-
-#ifdef MALLOC_STATS
-
-typedef struct malloc_bin_stats_s malloc_bin_stats_t;
-struct malloc_bin_stats_s {
- /*
- * Number of allocation requests that corresponded to the size of this
- * bin.
- */
- uint64_t nrequests;
-
- /* Total number of runs created for this bin's size class. */
- uint64_t nruns;
-
- /*
- * Total number of runs reused by extracting them from the runs tree for
- * this bin's size class.
- */
- uint64_t reruns;
-
- /* High-water mark for this bin. */
- unsigned long highruns;
-
- /* Current number of runs in this bin. */
- unsigned long curruns;
-};
-
-typedef struct arena_stats_s arena_stats_t;
-struct arena_stats_s {
- /* Number of bytes currently mapped. */
- size_t mapped;
-
- /*
- * Total number of purge sweeps, total number of madvise calls made,
- * and total pages purged in order to keep dirty unused memory under
- * control.
- */
- uint64_t npurge;
- uint64_t nmadvise;
- uint64_t purged;
-#ifdef MALLOC_DECOMMIT
- /*
- * Total number of decommit/commit operations, and total number of
- * pages decommitted.
- */
- uint64_t ndecommit;
- uint64_t ncommit;
- uint64_t decommitted;
-#endif
-
- /* Per-size-category statistics. */
- size_t allocated_small;
- uint64_t nmalloc_small;
- uint64_t ndalloc_small;
-
- size_t allocated_large;
- uint64_t nmalloc_large;
- uint64_t ndalloc_large;
-
-#ifdef MALLOC_BALANCE
- /* Number of times this arena reassigned a thread due to contention. */
- uint64_t nbalance;
-#endif
-};
-
-typedef struct chunk_stats_s chunk_stats_t;
-struct chunk_stats_s {
- /* Number of chunks that were allocated. */
- uint64_t nchunks;
-
- /* High-water mark for number of chunks allocated. */
- unsigned long highchunks;
-
- /*
- * Current number of chunks allocated. This value isn't maintained for
- * any other purpose, so keep track of it in order to be able to set
- * highchunks.
- */
- unsigned long curchunks;
-};
-
-#endif /* #ifdef MALLOC_STATS */
-
-/******************************************************************************/
-/*
- * Extent data structures.
- */
-
-/* Tree of extents. */
-typedef struct extent_node_s extent_node_t;
-struct extent_node_s {
- /* Linkage for the size/address-ordered tree. */
- rb_node(extent_node_t) link_szad;
-
- /* Linkage for the address-ordered tree. */
- rb_node(extent_node_t) link_ad;
-
- /* Pointer to the extent that this tree node is responsible for. */
- void *addr;
-
- /* Total region size. */
- size_t size;
-};
-typedef rb_tree(extent_node_t) extent_tree_t;
-
-/******************************************************************************/
-/*
- * Radix tree data structures.
- */
-
-#ifdef MALLOC_VALIDATE
- /*
- * Size of each radix tree node (must be a power of 2). This impacts tree
- * depth.
- */
-# if (SIZEOF_PTR == 4)
-# define MALLOC_RTREE_NODESIZE (1U << 14)
-# else
-# define MALLOC_RTREE_NODESIZE CACHELINE
-# endif
-
-typedef struct malloc_rtree_s malloc_rtree_t;
-struct malloc_rtree_s {
- malloc_spinlock_t lock;
- void **root;
- unsigned height;
- unsigned level2bits[1]; /* Dynamically sized. */
-};
-#endif
-
-/******************************************************************************/
-/*
- * Reserve data structures.
- */
-
-/* Callback registration. */
-typedef struct reserve_reg_s reserve_reg_t;
-struct reserve_reg_s {
- /* Linkage for list of all registered callbacks. */
- ql_elm(reserve_reg_t) link;
-
- /* Callback function pointer. */
- reserve_cb_t *cb;
-
- /* Opaque application data pointer. */
- void *ctx;
-
- /*
- * Sequence number of condition notification most recently sent to this
- * callback.
- */
- uint64_t seq;
-};
-
-/******************************************************************************/
-/*
- * Arena data structures.
- */
-
-typedef struct arena_s arena_t;
-typedef struct arena_bin_s arena_bin_t;
-
-/* Each element of the chunk map corresponds to one page within the chunk. */
-typedef struct arena_chunk_map_s arena_chunk_map_t;
-struct arena_chunk_map_s {
- /*
- * Linkage for run trees. There are two disjoint uses:
- *
- * 1) arena_t's runs_avail tree.
- * 2) arena_run_t conceptually uses this linkage for in-use non-full
- * runs, rather than directly embedding linkage.
- */
- rb_node(arena_chunk_map_t) link;
-
- /*
- * Run address (or size) and various flags are stored together. The bit
- * layout looks like (assuming 32-bit system):
- *
- * ???????? ???????? ????---- --ckdzla
- *
- * ? : Unallocated: Run address for first/last pages, unset for internal
- * pages.
- * Small: Run address.
- * Large: Run size for first page, unset for trailing pages.
- * - : Unused.
- * c : decommitted?
- * k : key?
- * d : dirty?
- * z : zeroed?
- * l : large?
- * a : allocated?
- *
- * Following are example bit patterns for the three types of runs.
- *
- * r : run address
- * s : run size
- * x : don't care
- * - : 0
- * [cdzla] : bit set
- *
- * Unallocated:
- * ssssssss ssssssss ssss---- --c-----
- * xxxxxxxx xxxxxxxx xxxx---- ----d---
- * ssssssss ssssssss ssss---- -----z--
- *
- * Small:
- * rrrrrrrr rrrrrrrr rrrr---- -------a
- * rrrrrrrr rrrrrrrr rrrr---- -------a
- * rrrrrrrr rrrrrrrr rrrr---- -------a
- *
- * Large:
- * ssssssss ssssssss ssss---- ------la
- * -------- -------- -------- ------la
- * -------- -------- -------- ------la
- */
- size_t bits;
-#ifdef MALLOC_DECOMMIT
-#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
-#endif
-#define CHUNK_MAP_KEY ((size_t)0x10U)
-#define CHUNK_MAP_DIRTY ((size_t)0x08U)
-#define CHUNK_MAP_ZEROED ((size_t)0x04U)
-#define CHUNK_MAP_LARGE ((size_t)0x02U)
-#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
-};
-typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
-typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
-
-/* Arena chunk header. */
-typedef struct arena_chunk_s arena_chunk_t;
-struct arena_chunk_s {
- /* Arena that owns the chunk. */
- arena_t *arena;
-
- /* Linkage for the arena's chunks_dirty tree. */
- rb_node(arena_chunk_t) link_dirty;
-
- /* Number of dirty pages. */
- size_t ndirty;
-
- /* Map of pages within chunk that keeps track of free/large/small. */
- arena_chunk_map_t map[1]; /* Dynamically sized. */
-};
-typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
-
-typedef struct arena_run_s arena_run_t;
-struct arena_run_s {
-#ifdef MALLOC_DEBUG
- uint32_t magic;
-# define ARENA_RUN_MAGIC 0x384adf93
-#endif
-
- /* Bin this run is associated with. */
- arena_bin_t *bin;
-
- /* Index of first element that might have a free region. */
- unsigned regs_minelm;
-
- /* Number of free regions in run. */
- unsigned nfree;
-
- /* Bitmask of in-use regions (0: in use, 1: free). */
- unsigned regs_mask[1]; /* Dynamically sized. */
-};
-
-struct arena_bin_s {
- /*
- * Current run being used to service allocations of this bin's size
- * class.
- */
- arena_run_t *runcur;
-
- /*
- * Tree of non-full runs. This tree is used when looking for an
- * existing run when runcur is no longer usable. We choose the
- * non-full run that is lowest in memory; this policy tends to keep
- * objects packed well, and it can also help reduce the number of
- * almost-empty chunks.
- */
- arena_run_tree_t runs;
-
- /* Size of regions in a run for this bin's size class. */
- size_t reg_size;
-
- /* Total size of a run for this bin's size class. */
- size_t run_size;
-
- /* Total number of regions in a run for this bin's size class. */
- uint32_t nregs;
-
- /* Number of elements in a run's regs_mask for this bin's size class. */
- uint32_t regs_mask_nelms;
-
- /* Offset of first region in a run for this bin's size class. */
- uint32_t reg0_offset;
-
-#ifdef MALLOC_STATS
- /* Bin statistics. */
- malloc_bin_stats_t stats;
-#endif
-};
-
-struct arena_s {
-#ifdef MALLOC_DEBUG
- uint32_t magic;
-# define ARENA_MAGIC 0x947d3d24
-#endif
-
- /* All operations on this arena require that lock be locked. */
-#ifdef MOZ_MEMORY
- malloc_spinlock_t lock;
-#else
- pthread_mutex_t lock;
-#endif
-
-#ifdef MALLOC_STATS
- arena_stats_t stats;
-#endif
-
- /*
- * Chunk allocation sequence number, used to detect races with other
- * threads during chunk allocation, and then discard unnecessary chunks.
- */
- uint64_t chunk_seq;
-
- /* Tree of dirty-page-containing chunks this arena manages. */
- arena_chunk_tree_t chunks_dirty;
-
- /*
- * In order to avoid rapid chunk allocation/deallocation when an arena
- * oscillates right on the cusp of needing a new chunk, cache the most
- * recently freed chunk. The spare is left in the arena's chunk trees
- * until it is deleted.
- *
- * There is one spare chunk per arena, rather than one spare total, in
- * order to avoid interactions between multiple threads that could make
- * a single spare inadequate.
- */
- arena_chunk_t *spare;
-
- /*
- * Current count of pages within unused runs that are potentially
- * dirty, and for which madvise(... MADV_FREE) has not been called. By
- * tracking this, we can institute a limit on how much dirty unused
- * memory is mapped for each arena.
- */
- size_t ndirty;
-
- /*
- * Size/address-ordered tree of this arena's available runs. This tree
- * is used for first-best-fit run allocation.
- */
- arena_avail_tree_t runs_avail;
-
-#ifdef MALLOC_BALANCE
- /*
- * The arena load balancing machinery needs to keep track of how much
- * lock contention there is. This value is exponentially averaged.
- */
- uint32_t contention;
-#endif
-
- /*
- * bins is used to store rings of free regions of the following sizes,
- * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
- *
- * bins[i] | size |
- * --------+------+
- * 0 | 2 |
- * 1 | 4 |
- * 2 | 8 |
- * --------+------+
- * 3 | 16 |
- * 4 | 32 |
- * 5 | 48 |
- * 6 | 64 |
- * : :
- * : :
- * 33 | 496 |
- * 34 | 512 |
- * --------+------+
- * 35 | 1024 |
- * 36 | 2048 |
- * --------+------+
- */
- arena_bin_t bins[1]; /* Dynamically sized. */
-};
-
-/******************************************************************************/
-/*
- * Data.
- */
-
-/* Number of CPUs. */
-static unsigned ncpus;
-
-/* VM page size. */
-static size_t pagesize;
-static size_t pagesize_mask;
-static size_t pagesize_2pow;
-
-/* Various bin-related settings. */
-static size_t bin_maxclass; /* Max size class for bins. */
-static unsigned ntbins; /* Number of (2^n)-spaced tiny bins. */
-static unsigned nqbins; /* Number of quantum-spaced bins. */
-static unsigned nsbins; /* Number of (2^n)-spaced sub-page bins. */
-static size_t small_min;
-static size_t small_max;
-
-/* Various quantum-related settings. */
-static size_t quantum;
-static size_t quantum_mask; /* (quantum - 1). */
-
-/* Various chunk-related settings. */
-static size_t chunksize;
-static size_t chunksize_mask; /* (chunksize - 1). */
-static size_t chunk_npages;
-static size_t arena_chunk_header_npages;
-static size_t arena_maxclass; /* Max size class for arenas. */
-
-/********/
-/*
- * Chunks.
- */
-
-#ifdef MALLOC_VALIDATE
-static malloc_rtree_t *chunk_rtree;
-#endif
-
-/* Protects chunk-related data structures. */
-static malloc_mutex_t huge_mtx;
-
-/* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_t huge;
-
-#ifdef MALLOC_STATS
-/* Huge allocation statistics. */
-static uint64_t huge_nmalloc;
-static uint64_t huge_ndalloc;
-static size_t huge_allocated;
-#endif
-
-/****************/
-/*
- * Memory reserve.
- */
-
-#ifdef MALLOC_PAGEFILE
-static char pagefile_templ[PATH_MAX];
-#endif
-
-/* Protects reserve-related data structures. */
-static malloc_mutex_t reserve_mtx;
-
-/*
- * Bounds on acceptable reserve size, and current reserve size. Reserve
- * depletion may cause (reserve_cur < reserve_min).
- */
-static size_t reserve_min;
-static size_t reserve_cur;
-static size_t reserve_max;
-
-/* List of registered callbacks. */
-static ql_head(reserve_reg_t) reserve_regs;
-
-/*
- * Condition notification sequence number, used to determine whether all
- * registered callbacks have been notified of the most current condition.
- */
-static uint64_t reserve_seq;
-
-/*
- * Trees of chunks currently in the memory reserve. Depending on function,
- * different tree orderings are needed, which is why there are two trees with
- * the same contents.
- */
-static extent_tree_t reserve_chunks_szad;
-static extent_tree_t reserve_chunks_ad;
-
-/****************************/
-/*
- * base (internal allocation).
- */
-
-/*
- * Current pages that are being used for internal memory allocations. These
- * pages are carved up in cacheline-size quanta, so that there is no chance of
- * false cache line sharing.
- */
-static void *base_pages;
-static void *base_next_addr;
-#ifdef MALLOC_DECOMMIT
-static void *base_next_decommitted;
-#endif
-static void *base_past_addr; /* Addr immediately past base_pages. */
-static extent_node_t *base_nodes;
-static reserve_reg_t *base_reserve_regs;
-static malloc_mutex_t base_mtx;
-#ifdef MALLOC_STATS
-static size_t base_mapped;
-#endif
-
-/********/
-/*
- * Arenas.
- */
-
-/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-static arena_t **arenas;
-static unsigned narenas;
-static unsigned narenas_2pow;
-#ifndef NO_TLS
-# ifdef MALLOC_BALANCE
-static unsigned narenas_2pow;
-# else
-static unsigned next_arena;
-# endif
-#endif
-#ifdef MOZ_MEMORY
-static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
-#else
-static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */
-#endif
-
-#ifndef NO_TLS
-/*
- * Map of pthread_self() --> arenas[???], used for selecting an arena to use
- * for allocations.
- */
-#ifndef MOZ_MEMORY_WINDOWS
-static __thread arena_t *arenas_map;
-#endif
-#endif
-
-#ifdef MALLOC_STATS
-/* Chunk statistics. */
-static chunk_stats_t stats_chunks;
-#endif
-
-/*******************************/
-/*
- * Runtime configuration options.
- */
-const char *_malloc_options;
-
-#ifndef MALLOC_PRODUCTION
-static bool opt_abort = true;
-#ifdef MALLOC_FILL
-static bool opt_junk = true;
-#endif
-#else
-static bool opt_abort = false;
-#ifdef MALLOC_FILL
-static bool opt_junk = false;
-#endif
-#endif
-static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
-#ifdef MALLOC_BALANCE
-static uint64_t opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
-#endif
-static bool opt_print_stats = false;
-static size_t opt_quantum_2pow = QUANTUM_2POW_MIN;
-static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
-static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
-static int opt_reserve_min_lshift = 0;
-static int opt_reserve_range_lshift = 0;
-#ifdef MALLOC_PAGEFILE
-static bool opt_pagefile = false;
-#endif
-#ifdef MALLOC_UTRACE
-static bool opt_utrace = false;
-#endif
-#ifdef MALLOC_SYSV
-static bool opt_sysv = false;
-#endif
-#ifdef MALLOC_XMALLOC
-static bool opt_xmalloc = false;
-#endif
-#ifdef MALLOC_FILL
-static bool opt_zero = false;
-#endif
-static int opt_narenas_lshift = 0;
-
-#ifdef MALLOC_UTRACE
-typedef struct {
- void *p;
- size_t s;
- void *r;
-} malloc_utrace_t;
-
-#define UTRACE(a, b, c) \
- if (opt_utrace) { \
- malloc_utrace_t ut; \
- ut.p = (a); \
- ut.s = (b); \
- ut.r = (c); \
- utrace(&ut, sizeof(ut)); \
- }
-#else
-#define UTRACE(a, b, c)
-#endif
-
-/******************************************************************************/
-/*
- * Begin function prototypes for non-inline static functions.
- */
-
-static char *umax2s(uintmax_t x, char *s);
-static bool malloc_mutex_init(malloc_mutex_t *mutex);
-static bool malloc_spin_init(malloc_spinlock_t *lock);
-static void wrtmessage(const char *p1, const char *p2, const char *p3,
- const char *p4);
-#ifdef MALLOC_STATS
-#ifdef MOZ_MEMORY_DARWIN
-/* Avoid namespace collision with OS X's malloc APIs. */
-#define malloc_printf moz_malloc_printf
-#endif
-static void malloc_printf(const char *format, ...);
-#endif
-static bool base_pages_alloc_mmap(size_t minsize);
-static bool base_pages_alloc(size_t minsize);
-static void *base_alloc(size_t size);
-static void *base_calloc(size_t number, size_t size);
-static extent_node_t *base_node_alloc(void);
-static void base_node_dealloc(extent_node_t *node);
-static reserve_reg_t *base_reserve_reg_alloc(void);
-static void base_reserve_reg_dealloc(reserve_reg_t *reg);
-#ifdef MALLOC_STATS
-static void stats_print(arena_t *arena);
-#endif
-static void *pages_map(void *addr, size_t size, int pfd);
-static void pages_unmap(void *addr, size_t size);
-static void *chunk_alloc_mmap(size_t size, bool pagefile);
-#ifdef MALLOC_PAGEFILE
-static int pagefile_init(size_t size);
-static void pagefile_close(int pfd);
-#endif
-static void *chunk_recycle_reserve(size_t size, bool zero);
-static void *chunk_alloc(size_t size, bool zero, bool pagefile);
-static extent_node_t *chunk_dealloc_reserve(void *chunk, size_t size);
-static void chunk_dealloc_mmap(void *chunk, size_t size);
-static void chunk_dealloc(void *chunk, size_t size);
-#ifndef NO_TLS
-static arena_t *choose_arena_hard(void);
-#endif
-static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
- bool large, bool zero);
-static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
-static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
-static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
- size_t size, bool large, bool zero);
-static void arena_purge(arena_t *arena);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
-static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize);
-static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
-static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
-static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
-static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
-#ifdef MALLOC_BALANCE
-static void arena_lock_balance_hard(arena_t *arena);
-#endif
-static void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
-static void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
- size_t alloc_size);
-static size_t arena_salloc(const void *ptr);
-static void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
- void *ptr);
-static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t size, size_t oldsize);
-static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t size, size_t oldsize);
-static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize);
-static void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
-static bool arena_new(arena_t *arena);
-static arena_t *arenas_extend(unsigned ind);
-static void *huge_malloc(size_t size, bool zero);
-static void *huge_palloc(size_t alignment, size_t size);
-static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
-static void huge_dalloc(void *ptr);
-static void malloc_print_stats(void);
-#ifndef MOZ_MEMORY_WINDOWS
-static
-#endif
-bool malloc_init_hard(void);
-static void reserve_shrink(void);
-static uint64_t reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq);
-static uint64_t reserve_crit(size_t size, const char *fname, uint64_t seq);
-static void reserve_fail(size_t size, const char *fname);
-
-void _malloc_prefork(void);
-void _malloc_postfork(void);
-
-/*
- * End function prototypes.
- */
-/******************************************************************************/
-
-/*
- * umax2s() provides minimal integer printing functionality, which is
- * especially useful for situations where allocation in vsnprintf() calls would
- * potentially cause deadlock.
- */
-#define UMAX2S_BUFSIZE 21
-static char *
-umax2s(uintmax_t x, char *s)
-{
- unsigned i;
-
- i = UMAX2S_BUFSIZE - 1;
- s[i] = '\0';
- do {
- i--;
- s[i] = "0123456789"[x % 10];
- x /= 10;
- } while (x > 0);
-
- return (&s[i]);
-}
-
-static void
-wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
-{
-#ifdef MOZ_MEMORY_WINCE
- wchar_t buf[1024];
-#define WRT_PRINT(s) \
- MultiByteToWideChar(CP_ACP, 0, s, -1, buf, 1024); \
- OutputDebugStringW(buf)
-
- WRT_PRINT(p1);
- WRT_PRINT(p2);
- WRT_PRINT(p3);
- WRT_PRINT(p4);
-#else
-#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS)
-#define _write write
-#endif
- _write(STDERR_FILENO, p1, (unsigned int) strlen(p1));
- _write(STDERR_FILENO, p2, (unsigned int) strlen(p2));
- _write(STDERR_FILENO, p3, (unsigned int) strlen(p3));
- _write(STDERR_FILENO, p4, (unsigned int) strlen(p4));
-#endif
-
-}
-
-#define _malloc_message malloc_message
-
-void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
- const char *p4) = wrtmessage;
-
-#ifdef MALLOC_DEBUG
-# define assert(e) do { \
- if (!(e)) { \
- char line_buf[UMAX2S_BUFSIZE]; \
- _malloc_message(__FILE__, ":", umax2s(__LINE__, \
- line_buf), ": Failed assertion: "); \
- _malloc_message("\"", #e, "\"\n", ""); \
- abort(); \
- } \
-} while (0)
-#else
-#define assert(e)
-#endif
-
-/******************************************************************************/
-/*
- * Begin mutex. We can't use normal pthread mutexes in all places, because
- * they require malloc()ed memory, which causes bootstrapping issues in some
- * cases.
- */
-
-static bool
-malloc_mutex_init(malloc_mutex_t *mutex)
-{
-#if defined(MOZ_MEMORY_WINCE)
- InitializeCriticalSection(mutex);
-#elif defined(MOZ_MEMORY_WINDOWS)
- // XXXMB
- //if (__isthreaded)
- // if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
- // return (true);
- if (!InitializeCriticalSectionAndSpinCount(mutex, 4000))
- return true;
-#elif defined(MOZ_MEMORY_DARWIN)
- mutex->lock = OS_SPINLOCK_INIT;
-#elif defined(MOZ_MEMORY_LINUX)
- pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0)
- return (true);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
- if (pthread_mutex_init(mutex, &attr) != 0) {
- pthread_mutexattr_destroy(&attr);
- return (true);
- }
- pthread_mutexattr_destroy(&attr);
-#elif defined(MOZ_MEMORY)
- if (pthread_mutex_init(mutex, NULL) != 0)
- return (true);
-#else
- static const spinlock_t lock = _SPINLOCK_INITIALIZER;
-
- mutex->lock = lock;
-#endif
- return (false);
-}
-
-static inline void
-malloc_mutex_lock(malloc_mutex_t *mutex)
-{
-
-#if defined(MOZ_MEMORY_WINDOWS)
- EnterCriticalSection(mutex);
-#elif defined(MOZ_MEMORY_DARWIN)
- OSSpinLockLock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_lock(mutex);
-#else
- if (__isthreaded)
- _SPINLOCK(&mutex->lock);
-#endif
-}
-
-static inline void
-malloc_mutex_unlock(malloc_mutex_t *mutex)
-{
-
-#if defined(MOZ_MEMORY_WINDOWS)
- LeaveCriticalSection(mutex);
-#elif defined(MOZ_MEMORY_DARWIN)
- OSSpinLockUnlock(&mutex->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_unlock(mutex);
-#else
- if (__isthreaded)
- _SPINUNLOCK(&mutex->lock);
-#endif
-}
-
-static bool
-malloc_spin_init(malloc_spinlock_t *lock)
-{
-#if defined(MOZ_MEMORY_WINCE)
- InitializeCriticalSection(lock);
-#elif defined(MOZ_MEMORY_WINDOWS)
- // XXXMB
- //if (__isthreaded)
- // if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
- // return (true);
-#elif defined(MOZ_MEMORY_DARWIN)
- lock->lock = OS_SPINLOCK_INIT;
-#elif defined(MOZ_MEMORY_LINUX)
- pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0)
- return (true);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
- if (pthread_mutex_init(lock, &attr) != 0) {
- pthread_mutexattr_destroy(&attr);
- return (true);
- }
- pthread_mutexattr_destroy(&attr);
-#elif defined(MOZ_MEMORY)
- if (pthread_mutex_init(lock, NULL) != 0)
- return (true);
-#else
- lock->lock = _SPINLOCK_INITIALIZER;
-#endif
- return (false);
-}
-
-static inline void
-malloc_spin_lock(malloc_spinlock_t *lock)
-{
-
-#if defined(MOZ_MEMORY_WINDOWS)
- EnterCriticalSection(lock);
-#elif defined(MOZ_MEMORY_DARWIN)
- OSSpinLockLock(&lock->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_lock(lock);
-#else
- if (__isthreaded)
- _SPINLOCK(&lock->lock);
-#endif
-}
-
-static inline void
-malloc_spin_unlock(malloc_spinlock_t *lock)
-{
-#if defined(MOZ_MEMORY_WINDOWS)
- LeaveCriticalSection(lock);
-#elif defined(MOZ_MEMORY_DARWIN)
- OSSpinLockUnlock(&lock->lock);
-#elif defined(MOZ_MEMORY)
- pthread_mutex_unlock(lock);
-#else
- if (__isthreaded)
- _SPINUNLOCK(&lock->lock);
-#endif
-}
-
-/*
- * End mutex.
- */
-/******************************************************************************/
-/*
- * Begin spin lock. Spin locks here are actually adaptive mutexes that block
- * after a period of spinning, because unbounded spinning would allow for
- * priority inversion.
- */
-
-#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
-# define malloc_spin_init malloc_mutex_init
-# define malloc_spin_lock malloc_mutex_lock
-# define malloc_spin_unlock malloc_mutex_unlock
-#endif
-
-#ifndef MOZ_MEMORY
-/*
- * We use an unpublished interface to initialize pthread mutexes with an
- * allocation callback, in order to avoid infinite recursion.
- */
-int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
- void *(calloc_cb)(size_t, size_t));
-
-__weak_reference(_pthread_mutex_init_calloc_cb_stub,
- _pthread_mutex_init_calloc_cb);
-
-int
-_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
- void *(calloc_cb)(size_t, size_t))
-{
-
- return (0);
-}
-
-static bool
-malloc_spin_init(pthread_mutex_t *lock)
-{
-
- if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0)
- return (true);
-
- return (false);
-}
-
-static inline unsigned
-malloc_spin_lock(pthread_mutex_t *lock)
-{
- unsigned ret = 0;
-
- if (__isthreaded) {
- if (_pthread_mutex_trylock(lock) != 0) {
- unsigned i;
- volatile unsigned j;
-
- /* Exponentially back off. */
- for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
- for (j = 0; j < (1U << i); j++)
- ret++;
-
- CPU_SPINWAIT;
- if (_pthread_mutex_trylock(lock) == 0)
- return (ret);
- }
-
- /*
- * Spinning failed. Block until the lock becomes
- * available, in order to avoid indefinite priority
- * inversion.
- */
- _pthread_mutex_lock(lock);
- assert((ret << BLOCK_COST_2POW) != 0);
- return (ret << BLOCK_COST_2POW);
- }
- }
-
- return (ret);
-}
-
-static inline void
-malloc_spin_unlock(pthread_mutex_t *lock)
-{
-
- if (__isthreaded)
- _pthread_mutex_unlock(lock);
-}
-#endif
-
-/*
- * End spin lock.
- */
-/******************************************************************************/
-/*
- * Begin Utility functions/macros.
- */
-
-/* Return the chunk address for allocation address a. */
-#define CHUNK_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~chunksize_mask))
-
-/* Return the chunk offset of address a. */
-#define CHUNK_ADDR2OFFSET(a) \
- ((size_t)((uintptr_t)(a) & chunksize_mask))
-
-/* Return the smallest chunk multiple that is >= s. */
-#define CHUNK_CEILING(s) \
- (((s) + chunksize_mask) & ~chunksize_mask)
-
-/* Return the smallest cacheline multiple that is >= s. */
-#define CACHELINE_CEILING(s) \
- (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
-
-/* Return the smallest quantum multiple that is >= a. */
-#define QUANTUM_CEILING(a) \
- (((a) + quantum_mask) & ~quantum_mask)
-
-/* Return the smallest pagesize multiple that is >= s. */
-#define PAGE_CEILING(s) \
- (((s) + pagesize_mask) & ~pagesize_mask)
-
-/* Compute the smallest power of 2 that is >= x. */
-static inline size_t
-pow2_ceil(size_t x)
-{
-
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
-#if (SIZEOF_PTR == 8)
- x |= x >> 32;
-#endif
- x++;
- return (x);
-}
-
-#ifdef MALLOC_BALANCE
-/*
- * Use a simple linear congruential pseudo-random number generator:
- *
- * prn(y) = (a*x + c) % m
- *
- * where the following constants ensure maximal period:
- *
- * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
- * c == Odd number (relatively prime to 2^n).
- * m == 2^32
- *
- * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
- *
- * This choice of m has the disadvantage that the quality of the bits is
- * proportional to bit position. For example. the lowest bit has a cycle of 2,
- * the next has a cycle of 4, etc. For this reason, we prefer to use the upper
- * bits.
- */
-# define PRN_DEFINE(suffix, var, a, c) \
-static inline void \
-sprn_##suffix(uint32_t seed) \
-{ \
- var = seed; \
-} \
- \
-static inline uint32_t \
-prn_##suffix(uint32_t lg_range) \
-{ \
- uint32_t ret, x; \
- \
- assert(lg_range > 0); \
- assert(lg_range <= 32); \
- \
- x = (var * (a)) + (c); \
- var = x; \
- ret = x >> (32 - lg_range); \
- \
- return (ret); \
-}
-# define SPRN(suffix, seed) sprn_##suffix(seed)
-# define PRN(suffix, lg_range) prn_##suffix(lg_range)
-#endif
-
-#ifdef MALLOC_BALANCE
-/* Define the PRNG used for arena assignment. */
-static __thread uint32_t balance_x;
-PRN_DEFINE(balance, balance_x, 1297, 1301)
-#endif
-
-#ifdef MALLOC_UTRACE
-static int
-utrace(const void *addr, size_t len)
-{
- malloc_utrace_t *ut = (malloc_utrace_t *)addr;
-
- assert(len == sizeof(malloc_utrace_t));
-
- if (ut->p == NULL && ut->s == 0 && ut->r == NULL)
- malloc_printf("%d x USER malloc_init()\n", getpid());
- else if (ut->p == NULL && ut->r != NULL) {
- malloc_printf("%d x USER %p = malloc(%zu)\n", getpid(), ut->r,
- ut->s);
- } else if (ut->p != NULL && ut->r != NULL) {
- malloc_printf("%d x USER %p = realloc(%p, %zu)\n", getpid(),
- ut->r, ut->p, ut->s);
- } else
- malloc_printf("%d x USER free(%p)\n", getpid(), ut->p);
-
- return (0);
-}
-#endif
-
-static inline const char *
-_getprogname(void)
-{
-
- return ("<jemalloc>");
-}
-
-#ifdef MALLOC_STATS
-/*
- * Print to stderr in such a way as to (hopefully) avoid memory allocation.
- */
-static void
-malloc_printf(const char *format, ...)
-{
-#ifndef WINCE
- char buf[4096];
- va_list ap;
-
- va_start(ap, format);
- vsnprintf(buf, sizeof(buf), format, ap);
- va_end(ap);
- _malloc_message(buf, "", "", "");
-#endif
-}
-#endif
-
-/******************************************************************************/
-
-#ifdef MALLOC_DECOMMIT
-static inline void
-pages_decommit(void *addr, size_t size)
-{
-
-#ifdef MOZ_MEMORY_WINDOWS
- VirtualFree(addr, size, MEM_DECOMMIT);
-#else
- if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
- 0) == MAP_FAILED)
- abort();
-#endif
-}
-
-static inline void
-pages_commit(void *addr, size_t size)
-{
-
-# ifdef MOZ_MEMORY_WINDOWS
- VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
-# else
- if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
- MAP_ANON, -1, 0) == MAP_FAILED)
- abort();
-# endif
-}
-#endif
-
-static bool
-base_pages_alloc_mmap(size_t minsize)
-{
- bool ret;
- size_t csize;
-#ifdef MALLOC_DECOMMIT
- size_t pminsize;
-#endif
- int pfd;
-
- assert(minsize != 0);
- csize = CHUNK_CEILING(minsize);
-#ifdef MALLOC_PAGEFILE
- if (opt_pagefile) {
- pfd = pagefile_init(csize);
- if (pfd == -1)
- return (true);
- } else
-#endif
- pfd = -1;
- base_pages = pages_map(NULL, csize, pfd);
- if (base_pages == NULL) {
- ret = true;
- goto RETURN;
- }
- base_next_addr = base_pages;
- base_past_addr = (void *)((uintptr_t)base_pages + csize);
-#ifdef MALLOC_DECOMMIT
- /*
- * Leave enough pages for minsize committed, since otherwise they would
- * have to be immediately recommitted.
- */
- pminsize = PAGE_CEILING(minsize);
- base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
- if (pminsize < csize)
- pages_decommit(base_next_decommitted, csize - pminsize);
-#endif
-#ifdef MALLOC_STATS
- base_mapped += csize;
-#endif
-
- ret = false;
-RETURN:
-#ifdef MALLOC_PAGEFILE
- if (pfd != -1)
- pagefile_close(pfd);
-#endif
- return (false);
-}
-
-static bool
-base_pages_alloc(size_t minsize)
-{
-
- if (base_pages_alloc_mmap(minsize) == false)
- return (false);
-
- return (true);
-}
-
-static void *
-base_alloc(size_t size)
-{
- void *ret;
- size_t csize;
-
- /* Round size up to nearest multiple of the cacheline size. */
- csize = CACHELINE_CEILING(size);
-
- malloc_mutex_lock(&base_mtx);
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
- malloc_mutex_unlock(&base_mtx);
- return (NULL);
- }
- }
- /* Allocate. */
- ret = base_next_addr;
- base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
-#ifdef MALLOC_DECOMMIT
- /* Make sure enough pages are committed for the new allocation. */
- if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
- void *pbase_next_addr =
- (void *)(PAGE_CEILING((uintptr_t)base_next_addr));
-
- pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
- (uintptr_t)base_next_decommitted);
- base_next_decommitted = pbase_next_addr;
- }
-#endif
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, false);
-
- return (ret);
-}
-
-static void *
-base_calloc(size_t number, size_t size)
-{
- void *ret;
-
- ret = base_alloc(number * size);
-#ifdef MALLOC_VALGRIND
- if (ret != NULL) {
- VALGRIND_FREELIKE_BLOCK(ret, 0);
- VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, true);
- }
-#endif
- memset(ret, 0, number * size);
-
- return (ret);
-}
-
-static extent_node_t *
-base_node_alloc(void)
-{
- extent_node_t *ret;
-
- malloc_mutex_lock(&base_mtx);
- if (base_nodes != NULL) {
- ret = base_nodes;
- base_nodes = *(extent_node_t **)ret;
- VALGRIND_FREELIKE_BLOCK(ret, 0);
- VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(extent_node_t), 0, false);
- malloc_mutex_unlock(&base_mtx);
- } else {
- malloc_mutex_unlock(&base_mtx);
- ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
- }
-
- return (ret);
-}
-
-static void
-base_node_dealloc(extent_node_t *node)
-{
-
- malloc_mutex_lock(&base_mtx);
- VALGRIND_FREELIKE_BLOCK(node, 0);
- VALGRIND_MALLOCLIKE_BLOCK(node, sizeof(extent_node_t *), 0, false);
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
- malloc_mutex_unlock(&base_mtx);
-}
-
-static reserve_reg_t *
-base_reserve_reg_alloc(void)
-{
- reserve_reg_t *ret;
-
- malloc_mutex_lock(&base_mtx);
- if (base_reserve_regs != NULL) {
- ret = base_reserve_regs;
- base_reserve_regs = *(reserve_reg_t **)ret;
- VALGRIND_FREELIKE_BLOCK(ret, 0);
- VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(reserve_reg_t), 0, false);
- malloc_mutex_unlock(&base_mtx);
- } else {
- malloc_mutex_unlock(&base_mtx);
- ret = (reserve_reg_t *)base_alloc(sizeof(reserve_reg_t));
- }
-
- return (ret);
-}
-
-static void
-base_reserve_reg_dealloc(reserve_reg_t *reg)
-{
-
- malloc_mutex_lock(&base_mtx);
- VALGRIND_FREELIKE_BLOCK(reg, 0);
- VALGRIND_MALLOCLIKE_BLOCK(reg, sizeof(reserve_reg_t *), 0, false);
- *(reserve_reg_t **)reg = base_reserve_regs;
- base_reserve_regs = reg;
- malloc_mutex_unlock(&base_mtx);
-}
-
-/******************************************************************************/
-
-#ifdef MALLOC_STATS
-static void
-stats_print(arena_t *arena)
-{
- unsigned i, gap_start;
-
-#ifdef MOZ_MEMORY_WINDOWS
- malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s,"
- " %I64u madvise%s, %I64u page%s purged\n",
- arena->ndirty, arena->ndirty == 1 ? "" : "s",
- arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
- arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
- arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
-# ifdef MALLOC_DECOMMIT
- malloc_printf("decommit: %I64u decommit%s, %I64u commit%s,"
- " %I64u page%s decommitted\n",
- arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
- arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
- arena->stats.decommitted,
- (arena->stats.decommitted == 1) ? "" : "s");
-# endif
-
- malloc_printf(" allocated nmalloc ndalloc\n");
- malloc_printf("small: %12Iu %12I64u %12I64u\n",
- arena->stats.allocated_small, arena->stats.nmalloc_small,
- arena->stats.ndalloc_small);
- malloc_printf("large: %12Iu %12I64u %12I64u\n",
- arena->stats.allocated_large, arena->stats.nmalloc_large,
- arena->stats.ndalloc_large);
- malloc_printf("total: %12Iu %12I64u %12I64u\n",
- arena->stats.allocated_small + arena->stats.allocated_large,
- arena->stats.nmalloc_small + arena->stats.nmalloc_large,
- arena->stats.ndalloc_small + arena->stats.ndalloc_large);
- malloc_printf("mapped: %12Iu\n", arena->stats.mapped);
-#else
- malloc_printf("dirty: %zu page%s dirty, %llu sweep%s,"
- " %llu madvise%s, %llu page%s purged\n",
- arena->ndirty, arena->ndirty == 1 ? "" : "s",
- arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
- arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
- arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
-# ifdef MALLOC_DECOMMIT
- malloc_printf("decommit: %llu decommit%s, %llu commit%s,"
- " %llu page%s decommitted\n",
- arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
- arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
- arena->stats.decommitted,
- (arena->stats.decommitted == 1) ? "" : "s");
-# endif
-
- malloc_printf(" allocated nmalloc ndalloc\n");
- malloc_printf("small: %12zu %12llu %12llu\n",
- arena->stats.allocated_small, arena->stats.nmalloc_small,
- arena->stats.ndalloc_small);
- malloc_printf("large: %12zu %12llu %12llu\n",
- arena->stats.allocated_large, arena->stats.nmalloc_large,
- arena->stats.ndalloc_large);
- malloc_printf("total: %12zu %12llu %12llu\n",
- arena->stats.allocated_small + arena->stats.allocated_large,
- arena->stats.nmalloc_small + arena->stats.nmalloc_large,
- arena->stats.ndalloc_small + arena->stats.ndalloc_large);
- malloc_printf("mapped: %12zu\n", arena->stats.mapped);
-#endif
- malloc_printf("bins: bin size regs pgs requests newruns"
- " reruns maxruns curruns\n");
- for (i = 0, gap_start = UINT_MAX; i < ntbins + nqbins + nsbins; i++) {
- if (arena->bins[i].stats.nrequests == 0) {
- if (gap_start == UINT_MAX)
- gap_start = i;
- } else {
- if (gap_start != UINT_MAX) {
- if (i > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_printf("[%u..%u]\n",
- gap_start, i - 1);
- } else {
- /* Gap of one size class. */
- malloc_printf("[%u]\n", gap_start);
- }
- gap_start = UINT_MAX;
- }
- malloc_printf(
-#if defined(MOZ_MEMORY_WINDOWS)
- "%13u %1s %4u %4u %3u %9I64u %9I64u"
- " %9I64u %7u %7u\n",
-#else
- "%13u %1s %4u %4u %3u %9llu %9llu"
- " %9llu %7lu %7lu\n",
-#endif
- i,
- i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
- arena->bins[i].reg_size,
- arena->bins[i].nregs,
- arena->bins[i].run_size >> pagesize_2pow,
- arena->bins[i].stats.nrequests,
- arena->bins[i].stats.nruns,
- arena->bins[i].stats.reruns,
- arena->bins[i].stats.highruns,
- arena->bins[i].stats.curruns);
- }
- }
- if (gap_start != UINT_MAX) {
- if (i > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_printf("[%u..%u]\n", gap_start, i - 1);
- } else {
- /* Gap of one size class. */
- malloc_printf("[%u]\n", gap_start);
- }
- }
-}
-#endif
-
-/*
- * End Utility functions/macros.
- */
-/******************************************************************************/
-/*
- * Begin extent tree code.
- */
-
-static inline int
-extent_szad_comp(extent_node_t *a, extent_node_t *b)
-{
- int ret;
- size_t a_size = a->size;
- size_t b_size = b->size;
-
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
-
- ret = (a_addr > b_addr) - (a_addr < b_addr);
- }
-
- return (ret);
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(static, extent_tree_szad_, extent_tree_t, extent_node_t,
- link_szad, extent_szad_comp)
-
-static inline int
-extent_ad_comp(extent_node_t *a, extent_node_t *b)
-{
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
-
- return ((a_addr > b_addr) - (a_addr < b_addr));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
- extent_ad_comp)
-
-/*
- * End extent tree code.
- */
-/******************************************************************************/
-/*
- * Begin chunk management functions.
- */
-
-#ifdef MOZ_MEMORY_WINDOWS
-#ifdef MOZ_MEMORY_WINCE
-#define ALIGN_ADDR2OFFSET(al, ad) \
- ((uintptr_t)ad & (al - 1))
-static void *
-pages_map_align(size_t size, int pfd, size_t alignment)
-{
-
- void *ret;
- int offset;
- if (size % alignment)
- size += (alignment - (size % alignment));
- assert(size >= alignment);
- ret = pages_map(NULL, size, pfd);
- offset = ALIGN_ADDR2OFFSET(alignment, ret);
- if (offset) {
- /* try to over allocate by the ammount we're offset */
- void *tmp;
- pages_unmap(ret, size);
- tmp = VirtualAlloc(NULL, size + alignment - offset,
- MEM_RESERVE, PAGE_NOACCESS);
- if (offset == ALIGN_ADDR2OFFSET(alignment, tmp))
- ret = VirtualAlloc((void*)((intptr_t)tmp + alignment
- - offset), size, MEM_COMMIT,
- PAGE_READWRITE);
- else
- VirtualFree(tmp, 0, MEM_RELEASE);
- offset = ALIGN_ADDR2OFFSET(alignment, ret);
-
-
- if (offset) {
- /* over allocate to ensure we have an aligned region */
- ret = VirtualAlloc(NULL, size + alignment, MEM_RESERVE,
- PAGE_NOACCESS);
- offset = ALIGN_ADDR2OFFSET(alignment, ret);
- ret = VirtualAlloc((void*)((intptr_t)ret +
- alignment - offset),
- size, MEM_COMMIT, PAGE_READWRITE);
- }
- }
- return (ret);
-}
-#endif
-
-static void *
-pages_map(void *addr, size_t size, int pfd)
-{
- void *ret = NULL;
-#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
- void *va_ret;
- assert(addr == NULL);
- va_ret = VirtualAlloc(addr, size, MEM_RESERVE, PAGE_NOACCESS);
- if (va_ret)
- ret = VirtualAlloc(va_ret, size, MEM_COMMIT, PAGE_READWRITE);
- assert(va_ret == ret);
-#else
- ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
- PAGE_READWRITE);
-#endif
- return (ret);
-}
-
-static void
-pages_unmap(void *addr, size_t size)
-{
- if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
-#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
- if (GetLastError() == ERROR_INVALID_PARAMETER) {
- MEMORY_BASIC_INFORMATION info;
- VirtualQuery(addr, &info, sizeof(info));
- if (VirtualFree(info.AllocationBase, 0, MEM_RELEASE))
- return;
- }
-#endif
- _malloc_message(_getprogname(),
- ": (malloc) Error in VirtualFree()\n", "", "");
- if (opt_abort)
- abort();
- }
-}
-#elif (defined(MOZ_MEMORY_DARWIN))
-static void *
-pages_map(void *addr, size_t size, int pfd)
-{
- void *ret;
- kern_return_t err;
- int flags;
-
- if (addr != NULL) {
- ret = addr;
- flags = 0;
- } else
- flags = VM_FLAGS_ANYWHERE;
-
- err = vm_allocate((vm_map_t)mach_task_self(), (vm_address_t *)&ret,
- (vm_size_t)size, flags);
- if (err != KERN_SUCCESS)
- ret = NULL;
-
- assert(ret == NULL || (addr == NULL && ret != addr)
- || (addr != NULL && ret == addr));
- return (ret);
-}
-
-static void
-pages_unmap(void *addr, size_t size)
-{
- kern_return_t err;
-
- err = vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)addr,
- (vm_size_t)size);
- if (err != KERN_SUCCESS) {
- malloc_message(_getprogname(),
- ": (malloc) Error in vm_deallocate(): ",
- mach_error_string(err), "\n");
- if (opt_abort)
- abort();
- }
-}
-
-#define VM_COPY_MIN (pagesize << 5)
-static inline void
-pages_copy(void *dest, const void *src, size_t n)
-{
-
- assert((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
- assert(n >= VM_COPY_MIN);
- assert((void *)((uintptr_t)src & ~pagesize_mask) == src);
-
- vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
- (vm_address_t)dest);
-}
-#else /* MOZ_MEMORY_DARWIN */
-#ifdef JEMALLOC_USES_MAP_ALIGN
-static void *
-pages_map_align(size_t size, int pfd, size_t alignment)
-{
- void *ret;
-
- /*
- * We don't use MAP_FIXED here, because it can cause the *replacement*
- * of existing mappings, and we only want to create new mappings.
- */
-#ifdef MALLOC_PAGEFILE
- if (pfd != -1) {
- ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
- MAP_NOSYNC | MAP_ALIGN, pfd, 0);
- } else
-#endif
- {
- ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
- MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
- }
- assert(ret != NULL);
-
- if (ret == MAP_FAILED)
- ret = NULL;
- return (ret);
-}
-#endif
-
-static void *
-pages_map(void *addr, size_t size, int pfd)
-{
- void *ret;
-
- /*
- * We don't use MAP_FIXED here, because it can cause the *replacement*
- * of existing mappings, and we only want to create new mappings.
- */
-#ifdef MALLOC_PAGEFILE
- if (pfd != -1) {
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
- MAP_NOSYNC, pfd, 0);
- } else
-#endif
- {
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
- MAP_ANON, -1, 0);
- }
- assert(ret != NULL);
-
- if (ret == MAP_FAILED)
- ret = NULL;
- else if (addr != NULL && ret != addr) {
- /*
- * We succeeded in mapping memory, but not in the right place.
- */
- if (munmap(ret, size) == -1) {
- char buf[STRERROR_BUF];
-
- strerror_r(errno, buf, sizeof(buf));
- _malloc_message(_getprogname(),
- ": (malloc) Error in munmap(): ", buf, "\n");
- if (opt_abort)
- abort();
- }
- ret = NULL;
- }
-
- assert(ret == NULL || (addr == NULL && ret != addr)
- || (addr != NULL && ret == addr));
- return (ret);
-}
-
-static void
-pages_unmap(void *addr, size_t size)
-{
-
- if (munmap(addr, size) == -1) {
- char buf[STRERROR_BUF];
-
- strerror_r(errno, buf, sizeof(buf));
- _malloc_message(_getprogname(),
- ": (malloc) Error in munmap(): ", buf, "\n");
- if (opt_abort)
- abort();
- }
-}
-#endif
-
-#ifdef MALLOC_VALIDATE
-static inline malloc_rtree_t *
-malloc_rtree_new(unsigned bits)
-{
- malloc_rtree_t *ret;
- unsigned bits_per_level, height, i;
-
- bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE /
- sizeof(void *)))) - 1;
- height = bits / bits_per_level;
- if (height * bits_per_level != bits)
- height++;
- assert(height * bits_per_level >= bits);
-
- ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) + (sizeof(unsigned) *
- (height - 1)));
- if (ret == NULL)
- return (NULL);
-
- malloc_spin_init(&ret->lock);
- ret->height = height;
- if (bits_per_level * height > bits)
- ret->level2bits[0] = bits % bits_per_level;
- else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height; i++)
- ret->level2bits[i] = bits_per_level;
-
- ret->root = (void**)base_calloc(1, sizeof(void *) << ret->level2bits[0]);
- if (ret->root == NULL) {
- /*
- * We leak the rtree here, since there's no generic base
- * deallocation.
- */
- return (NULL);
- }
-
- return (ret);
-}
-
-/* The least significant bits of the key are ignored. */
-static inline void *
-malloc_rtree_get(malloc_rtree_t *rtree, uintptr_t key)
-{
- void *ret;
- uintptr_t subkey;
- unsigned i, lshift, height, bits;
- void **node, **child;
-
- malloc_spin_lock(&rtree->lock);
- for (i = lshift = 0, height = rtree->height, node = rtree->root;
- i < height - 1;
- i++, lshift += bits, node = child) {
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
- child = (void**)node[subkey];
- if (child == NULL) {
- malloc_spin_unlock(&rtree->lock);
- return (NULL);
- }
- }
-
- /* node is a leaf, so it contains values rather than node pointers. */
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
- ret = node[subkey];
- malloc_spin_unlock(&rtree->lock);
-
- return (ret);
-}
-
-static inline bool
-malloc_rtree_set(malloc_rtree_t *rtree, uintptr_t key, void *val)
-{
- uintptr_t subkey;
- unsigned i, lshift, height, bits;
- void **node, **child;
-
- malloc_spin_lock(&rtree->lock);
- for (i = lshift = 0, height = rtree->height, node = rtree->root;
- i < height - 1;
- i++, lshift += bits, node = child) {
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
- child = (void**)node[subkey];
- if (child == NULL) {
- child = (void**)base_calloc(1, sizeof(void *) <<
- rtree->level2bits[i+1]);
- if (child == NULL) {
- malloc_spin_unlock(&rtree->lock);
- return (true);
- }
- node[subkey] = child;
- }
- }
-
- /* node is a leaf, so it contains values rather than node pointers. */
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
- node[subkey] = val;
- malloc_spin_unlock(&rtree->lock);
-
- return (false);
-}
-#endif
-
-static void *
-chunk_alloc_mmap(size_t size, bool pagefile)
-{
- void *ret;
-#ifndef JEMALLOC_USES_MAP_ALIGN
- size_t offset;
-#endif
- int pfd;
-
-#ifdef MALLOC_PAGEFILE
- if (opt_pagefile && pagefile) {
- pfd = pagefile_init(size);
- if (pfd == -1)
- return (NULL);
- } else
-#endif
- pfd = -1;
-
- /*
- * Windows requires that there be a 1:1 mapping between VM
- * allocation/deallocation operations. Therefore, take care here to
- * acquire the final result via one mapping operation. This means
- * unmapping any preliminary result that is not correctly aligned.
- *
- * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
- * since it reduces the number of page files.
- */
-
-#ifdef JEMALLOC_USES_MAP_ALIGN
- ret = pages_map_align(size, pfd, chunksize);
-#else
- ret = pages_map(NULL, size, pfd);
- if (ret == NULL)
- goto RETURN;
-
- offset = CHUNK_ADDR2OFFSET(ret);
- if (offset != 0) {
- /* Deallocate, then try to allocate at (ret + size - offset). */
- pages_unmap(ret, size);
- ret = pages_map((void *)((uintptr_t)ret + size - offset), size,
- pfd);
- while (ret == NULL) {
- /*
- * Over-allocate in order to map a memory region that
- * is definitely large enough.
- */
- ret = pages_map(NULL, size + chunksize, -1);
- if (ret == NULL)
- goto RETURN;
- /*
- * Deallocate, then allocate the correct size, within
- * the over-sized mapping.
- */
- offset = CHUNK_ADDR2OFFSET(ret);
- pages_unmap(ret, size + chunksize);
- if (offset == 0)
- ret = pages_map(ret, size, pfd);
- else {
- ret = pages_map((void *)((uintptr_t)ret +
- chunksize - offset), size, pfd);
- }
- /*
- * Failure here indicates a race with another thread, so
- * try again.
- */
- }
- }
-RETURN:
-#endif
-#ifdef MALLOC_PAGEFILE
- if (pfd != -1)
- pagefile_close(pfd);
-#endif
-#ifdef MALLOC_STATS
- if (ret != NULL)
- stats_chunks.nchunks += (size / chunksize);
-#endif
- return (ret);
-}
-
-#ifdef MALLOC_PAGEFILE
-static int
-pagefile_init(size_t size)
-{
- int ret;
- size_t i;
- char pagefile_path[PATH_MAX];
- char zbuf[MALLOC_PAGEFILE_WRITE_SIZE];
-
- /*
- * Create a temporary file, then immediately unlink it so that it will
- * not persist.
- */
- strcpy(pagefile_path, pagefile_templ);
- ret = mkstemp(pagefile_path);
- if (ret == -1)
- return (ret);
- if (unlink(pagefile_path)) {
- char buf[STRERROR_BUF];
-
- strerror_r(errno, buf, sizeof(buf));
- _malloc_message(_getprogname(), ": (malloc) Error in unlink(\"",
- pagefile_path, "\"):");
- _malloc_message(buf, "\n", "", "");
- if (opt_abort)
- abort();
- }
-
- /*
- * Write sequential zeroes to the file in order to assure that disk
- * space is committed, with minimal fragmentation. It would be
- * sufficient to write one zero per disk block, but that potentially
- * results in more system calls, for no real gain.
- */
- memset(zbuf, 0, sizeof(zbuf));
- for (i = 0; i < size; i += sizeof(zbuf)) {
- if (write(ret, zbuf, sizeof(zbuf)) != sizeof(zbuf)) {
- if (errno != ENOSPC) {
- char buf[STRERROR_BUF];
-
- strerror_r(errno, buf, sizeof(buf));
- _malloc_message(_getprogname(),
- ": (malloc) Error in write(): ", buf, "\n");
- if (opt_abort)
- abort();
- }
- pagefile_close(ret);
- return (-1);
- }
- }
-
- return (ret);
-}
-
-static void
-pagefile_close(int pfd)
-{
-
- if (close(pfd)) {
- char buf[STRERROR_BUF];
-
- strerror_r(errno, buf, sizeof(buf));
- _malloc_message(_getprogname(),
- ": (malloc) Error in close(): ", buf, "\n");
- if (opt_abort)
- abort();
- }
-}
-#endif
-
-static void *
-chunk_recycle_reserve(size_t size, bool zero)
-{
- extent_node_t *node, key;
-
-#ifdef MALLOC_DECOMMIT
- if (size != chunksize)
- return (NULL);
-#endif
-
- key.addr = NULL;
- key.size = size;
- malloc_mutex_lock(&reserve_mtx);
- node = extent_tree_szad_nsearch(&reserve_chunks_szad, &key);
- if (node != NULL) {
- void *ret = node->addr;
-
- /* Remove node from the tree. */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
-#ifndef MALLOC_DECOMMIT
- if (node->size == size) {
-#else
- assert(node->size == size);
-#endif
- extent_tree_ad_remove(&reserve_chunks_ad, node);
- base_node_dealloc(node);
-#ifndef MALLOC_DECOMMIT
- } else {
- /*
- * Insert the remainder of node's address range as a
- * smaller chunk. Its position within reserve_chunks_ad
- * does not change.
- */
- assert(node->size > size);
- node->addr = (void *)((uintptr_t)node->addr + size);
- node->size -= size;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
- }
-#endif
- reserve_cur -= size;
- /*
- * Try to replenish the reserve if this allocation depleted it.
- */
-#ifndef MALLOC_DECOMMIT
- if (reserve_cur < reserve_min) {
- size_t diff = reserve_min - reserve_cur;
-#else
- while (reserve_cur < reserve_min) {
-# define diff chunksize
-#endif
- void *chunk;
-
- malloc_mutex_unlock(&reserve_mtx);
- chunk = chunk_alloc_mmap(diff, true);
- malloc_mutex_lock(&reserve_mtx);
- if (chunk == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_notify(RESERVE_CND_LOW,
- size, seq);
- if (seq == 0)
- goto MALLOC_OUT;
- } while (reserve_cur < reserve_min);
- } else {
- extent_node_t *node;
-
- node = chunk_dealloc_reserve(chunk, diff);
- if (node == NULL) {
- uint64_t seq = 0;
-
- pages_unmap(chunk, diff);
- do {
- seq = reserve_notify(
- RESERVE_CND_LOW, size, seq);
- if (seq == 0)
- goto MALLOC_OUT;
- } while (reserve_cur < reserve_min);
- }
- }
- }
-MALLOC_OUT:
- malloc_mutex_unlock(&reserve_mtx);
-
-#ifdef MALLOC_DECOMMIT
- pages_commit(ret, size);
-# undef diff
-#else
- if (zero)
- memset(ret, 0, size);
-#endif
- return (ret);
- }
- malloc_mutex_unlock(&reserve_mtx);
-
- return (NULL);
-}
-
-static void *
-chunk_alloc(size_t size, bool zero, bool pagefile)
-{
- void *ret;
-
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
-
- ret = chunk_recycle_reserve(size, zero);
- if (ret != NULL)
- goto RETURN;
-
- ret = chunk_alloc_mmap(size, pagefile);
- if (ret != NULL) {
- goto RETURN;
- }
-
- /* All strategies for allocation failed. */
- ret = NULL;
-RETURN:
-#ifdef MALLOC_STATS
- if (ret != NULL)
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks)
- stats_chunks.highchunks = stats_chunks.curchunks;
-#endif
-
-#ifdef MALLOC_VALIDATE
- if (ret != NULL) {
- if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) {
- chunk_dealloc(ret, size);
- return (NULL);
- }
- }
-#endif
-
- assert(CHUNK_ADDR2BASE(ret) == ret);
- return (ret);
-}
-
-static extent_node_t *
-chunk_dealloc_reserve(void *chunk, size_t size)
-{
- extent_node_t *node;
-
-#ifdef MALLOC_DECOMMIT
- if (size != chunksize)
- return (NULL);
-#else
- extent_node_t *prev, key;
-
- key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&reserve_chunks_ad, &key);
- /* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
- /*
- * Coalesce chunk with the following address range. This does
- * not change the position within reserve_chunks_ad, so only
- * remove/insert from/into reserve_chunks_szad.
- */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
- } else {
-#endif
- /* Coalescing forward failed, so insert a new node. */
- node = base_node_alloc();
- if (node == NULL)
- return (NULL);
- node->addr = chunk;
- node->size = size;
- extent_tree_ad_insert(&reserve_chunks_ad, node);
- extent_tree_szad_insert(&reserve_chunks_szad, node);
-#ifndef MALLOC_DECOMMIT
- }
-
- /* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&reserve_chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
- /*
- * Coalesce chunk with the previous address range. This does
- * not change the position within reserve_chunks_ad, so only
- * remove/insert node from/into reserve_chunks_szad.
- */
- extent_tree_szad_remove(&reserve_chunks_szad, prev);
- extent_tree_ad_remove(&reserve_chunks_ad, prev);
-
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
-
- base_node_dealloc(prev);
- }
-#endif
-
-#ifdef MALLOC_DECOMMIT
- pages_decommit(chunk, size);
-#else
- madvise(chunk, size, MADV_FREE);
-#endif
-
- reserve_cur += size;
- if (reserve_cur > reserve_max)
- reserve_shrink();
-
- return (node);
-}
-
-static void
-chunk_dealloc_mmap(void *chunk, size_t size)
-{
-
- pages_unmap(chunk, size);
-}
-
-static void
-chunk_dealloc(void *chunk, size_t size)
-{
- extent_node_t *node;
-
- assert(chunk != NULL);
- assert(CHUNK_ADDR2BASE(chunk) == chunk);
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
-
-#ifdef MALLOC_STATS
- stats_chunks.curchunks -= (size / chunksize);
-#endif
-#ifdef MALLOC_VALIDATE
- malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
-#endif
-
- /* Try to merge chunk into the reserve. */
- malloc_mutex_lock(&reserve_mtx);
- node = chunk_dealloc_reserve(chunk, size);
- malloc_mutex_unlock(&reserve_mtx);
- if (node == NULL)
- chunk_dealloc_mmap(chunk, size);
-}
-
-/*
- * End chunk management functions.
- */
-/******************************************************************************/
-/*
- * Begin arena.
- */
-
-/*
- * Choose an arena based on a per-thread value (fast-path code, calls slow-path
- * code if necessary).
- */
-static inline arena_t *
-choose_arena(void)
-{
- arena_t *ret;
-
- /*
- * We can only use TLS if this is a PIC library, since for the static
- * library version, libc's malloc is used by TLS allocation, which
- * introduces a bootstrapping issue.
- */
-#ifndef NO_TLS
- if (__isthreaded == false) {
- /* Avoid the overhead of TLS for single-threaded operation. */
- return (arenas[0]);
- }
-
-# ifdef MOZ_MEMORY_WINDOWS
- ret = (arena_t*)TlsGetValue(tlsIndex);
-# else
- ret = arenas_map;
-# endif
-
- if (ret == NULL) {
- ret = choose_arena_hard();
- assert(ret != NULL);
- }
-#else
- if (__isthreaded && narenas > 1) {
- unsigned long ind;
-
- /*
- * Hash _pthread_self() to one of the arenas. There is a prime
- * number of arenas, so this has a reasonable chance of
- * working. Even so, the hashing can be easily thwarted by
- * inconvenient _pthread_self() values. Without specific
- * knowledge of how _pthread_self() calculates values, we can't
- * easily do much better than this.
- */
- ind = (unsigned long) _pthread_self() % narenas;
-
- /*
- * Optimistially assume that arenas[ind] has been initialized.
- * At worst, we find out that some other thread has already
- * done so, after acquiring the lock in preparation. Note that
- * this lazy locking also has the effect of lazily forcing
- * cache coherency; without the lock acquisition, there's no
- * guarantee that modification of arenas[ind] by another thread
- * would be seen on this CPU for an arbitrary amount of time.
- *
- * In general, this approach to modifying a synchronized value
- * isn't a good idea, but in this case we only ever modify the
- * value once, so things work out well.
- */
- ret = arenas[ind];
- if (ret == NULL) {
- /*
- * Avoid races with another thread that may have already
- * initialized arenas[ind].
- */
- malloc_spin_lock(&arenas_lock);
- if (arenas[ind] == NULL)
- ret = arenas_extend((unsigned)ind);
- else
- ret = arenas[ind];
- malloc_spin_unlock(&arenas_lock);
- }
- } else
- ret = arenas[0];
-#endif
-
- assert(ret != NULL);
- return (ret);
-}
-
-#ifndef NO_TLS
-/*
- * Choose an arena based on a per-thread value (slow-path code only, called
- * only by choose_arena()).
- */
-static arena_t *
-choose_arena_hard(void)
-{
- arena_t *ret;
-
- assert(__isthreaded);
-
-#ifdef MALLOC_BALANCE
- /* Seed the PRNG used for arena load balancing. */
- SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self()));
-#endif
-
- if (narenas > 1) {
-#ifdef MALLOC_BALANCE
- unsigned ind;
-
- ind = PRN(balance, narenas_2pow);
- if ((ret = arenas[ind]) == NULL) {
- malloc_spin_lock(&arenas_lock);
- if ((ret = arenas[ind]) == NULL)
- ret = arenas_extend(ind);
- malloc_spin_unlock(&arenas_lock);
- }
-#else
- malloc_spin_lock(&arenas_lock);
- if ((ret = arenas[next_arena]) == NULL)
- ret = arenas_extend(next_arena);
- next_arena = (next_arena + 1) % narenas;
- malloc_spin_unlock(&arenas_lock);
-#endif
- } else
- ret = arenas[0];
-
-#ifdef MOZ_MEMORY_WINDOWS
- TlsSetValue(tlsIndex, ret);
-#else
- arenas_map = ret;
-#endif
-
- return (ret);
-}
-#endif
-
-static inline int
-arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
-{
- uintptr_t a_chunk = (uintptr_t)a;
- uintptr_t b_chunk = (uintptr_t)b;
-
- assert(a != NULL);
- assert(b != NULL);
-
- return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(static, arena_chunk_tree_dirty_, arena_chunk_tree_t,
- arena_chunk_t, link_dirty, arena_chunk_comp)
-
-static inline int
-arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- uintptr_t a_mapelm = (uintptr_t)a;
- uintptr_t b_mapelm = (uintptr_t)b;
-
- assert(a != NULL);
- assert(b != NULL);
-
- return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
- arena_run_comp)
-
-static inline int
-arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- int ret;
- size_t a_size = a->bits & ~pagesize_mask;
- size_t b_size = b->bits & ~pagesize_mask;
-
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_mapelm, b_mapelm;
-
- if ((a->bits & CHUNK_MAP_KEY) == 0)
- a_mapelm = (uintptr_t)a;
- else {
- /*
- * Treat keys as though they are lower than anything
- * else.
- */
- a_mapelm = 0;
- }
- b_mapelm = (uintptr_t)b;
-
- ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
- }
-
- return (ret);
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(static, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link,
- arena_avail_comp)
-
-static inline void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
-{
- void *ret;
- unsigned i, mask, bit, regind;
-
- assert(run->magic == ARENA_RUN_MAGIC);
- assert(run->regs_minelm < bin->regs_mask_nelms);
-
- /*
- * Move the first check outside the loop, so that run->regs_minelm can
- * be updated unconditionally, without the possibility of updating it
- * multiple times.
- */
- i = run->regs_minelm;
- mask = run->regs_mask[i];
- if (mask != 0) {
- /* Usable allocation found. */
- bit = ffs((int)mask) - 1;
-
- regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
- assert(regind < bin->nregs);
- ret = (void *)(((uintptr_t)run) + bin->reg0_offset
- + (bin->reg_size * regind));
-
- /* Clear bit. */
- mask ^= (1U << bit);
- run->regs_mask[i] = mask;
-
- return (ret);
- }
-
- for (i++; i < bin->regs_mask_nelms; i++) {
- mask = run->regs_mask[i];
- if (mask != 0) {
- /* Usable allocation found. */
- bit = ffs((int)mask) - 1;
-
- regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
- assert(regind < bin->nregs);
- ret = (void *)(((uintptr_t)run) + bin->reg0_offset
- + (bin->reg_size * regind));
-
- /* Clear bit. */
- mask ^= (1U << bit);
- run->regs_mask[i] = mask;
-
- /*
- * Make a note that nothing before this element
- * contains a free region.
- */
- run->regs_minelm = i; /* Low payoff: + (mask == 0); */
-
- return (ret);
- }
- }
- /* Not reached. */
- assert(0);
- return (NULL);
-}
-
-static inline void
-arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
-{
- /*
- * To divide by a number D that is not a power of two we multiply
- * by (2^21 / D) and then right shift by 21 positions.
- *
- * X / D
- *
- * becomes
- *
- * (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
- */
-#define SIZE_INV_SHIFT 21
-#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
- static const unsigned size_invs[] = {
- SIZE_INV(3),
- SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
- SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
- SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
- SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
- SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
- SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
- SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
-#if (QUANTUM_2POW_MIN < 4)
- ,
- SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
- SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
- SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
- SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
- SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
- SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
- SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
- SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
-#endif
- };
- unsigned diff, regind, elm, bit;
-
- assert(run->magic == ARENA_RUN_MAGIC);
- assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
- >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
-
- /*
- * Avoid doing division with a variable divisor if possible. Using
- * actual division here can reduce allocator throughput by over 20%!
- */
- diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
- if ((size & (size - 1)) == 0) {
- /*
- * log2_table allows fast division of a power of two in the
- * [1..128] range.
- *
- * (x / divisor) becomes (x >> log2_table[divisor - 1]).
- */
- static const unsigned char log2_table[] = {
- 0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
- };
-
- if (size <= 128)
- regind = (diff >> log2_table[size - 1]);
- else if (size <= 32768)
- regind = diff >> (8 + log2_table[(size >> 8) - 1]);
- else {
- /*
- * The run size is too large for us to use the lookup
- * table. Use real division.
- */
- regind = diff / size;
- }
- } else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
- << QUANTUM_2POW_MIN) + 2) {
- regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
- regind >>= SIZE_INV_SHIFT;
- } else {
- /*
- * size_invs isn't large enough to handle this size class, so
- * calculate regind using actual division. This only happens
- * if the user increases small_max via the 'S' runtime
- * configuration option.
- */
- regind = diff / size;
- };
- assert(diff == regind * size);
- assert(regind < bin->nregs);
-
- elm = regind >> (SIZEOF_INT_2POW + 3);
- if (elm < run->regs_minelm)
- run->regs_minelm = elm;
- bit = regind - (elm << (SIZEOF_INT_2POW + 3));
- assert((run->regs_mask[elm] & (1U << bit)) == 0);
- run->regs_mask[elm] |= (1U << bit);
-#undef SIZE_INV
-#undef SIZE_INV_SHIFT
-}
-
-static void
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
- bool zero)
-{
- arena_chunk_t *chunk;
- size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- old_ndirty = chunk->ndirty;
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
- >> pagesize_2pow);
- total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
- pagesize_2pow;
- need_pages = (size >> pagesize_2pow);
- assert(need_pages > 0);
- assert(need_pages <= total_pages);
- rem_pages = total_pages - need_pages;
-
- arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
-
- /* Keep track of trailing unused pages for later use. */
- if (rem_pages > 0) {
- chunk->map[run_ind+need_pages].bits = (rem_pages <<
- pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
- pagesize_mask);
- chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
- pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
- pagesize_mask);
- arena_avail_tree_insert(&arena->runs_avail,
- &chunk->map[run_ind+need_pages]);
- }
-
- for (i = 0; i < need_pages; i++) {
-#ifdef MALLOC_DECOMMIT
- /*
- * Commit decommitted pages if necessary. If a decommitted
- * page is encountered, commit all needed adjacent decommitted
- * pages in one operation, in order to reduce system call
- * overhead.
- */
- if (chunk->map[run_ind + i].bits & CHUNK_MAP_DECOMMITTED) {
- size_t j;
-
- /*
- * Advance i+j to just past the index of the last page
- * to commit. Clear CHUNK_MAP_DECOMMITTED along the
- * way.
- */
- for (j = 0; i + j < need_pages && (chunk->map[run_ind +
- i + j].bits & CHUNK_MAP_DECOMMITTED); j++) {
- chunk->map[run_ind + i + j].bits ^=
- CHUNK_MAP_DECOMMITTED;
- }
-
- pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
- << pagesize_2pow)), (j << pagesize_2pow));
-# ifdef MALLOC_STATS
- arena->stats.ncommit++;
-# endif
- } else /* No need to zero since commit zeros. */
-#endif
-
- /* Zero if necessary. */
- if (zero) {
- if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
- == 0) {
- VALGRIND_MALLOCLIKE_BLOCK((void *)((uintptr_t)
- chunk + ((run_ind + i) << pagesize_2pow)),
- pagesize, 0, false);
- memset((void *)((uintptr_t)chunk + ((run_ind
- + i) << pagesize_2pow)), 0, pagesize);
- VALGRIND_FREELIKE_BLOCK((void *)((uintptr_t)
- chunk + ((run_ind + i) << pagesize_2pow)),
- 0);
- /* CHUNK_MAP_ZEROED is cleared below. */
- }
- }
-
- /* Update dirty page accounting. */
- if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
- chunk->ndirty--;
- arena->ndirty--;
- /* CHUNK_MAP_DIRTY is cleared below. */
- }
-
- /* Initialize the chunk map. */
- if (large) {
- chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE
- | CHUNK_MAP_ALLOCATED;
- } else {
- chunk->map[run_ind + i].bits = (size_t)run
- | CHUNK_MAP_ALLOCATED;
- }
- }
-
- /*
- * Set the run size only in the first element for large runs. This is
- * primarily a debugging aid, since the lack of size info for trailing
- * pages only matters if the application tries to operate on an
- * interior pointer.
- */
- if (large)
- chunk->map[run_ind].bits |= size;
-
- if (chunk->ndirty == 0 && old_ndirty > 0)
- arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
-}
-
-static void
-arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
-{
- arena_run_t *run;
- size_t i;
-
- VALGRIND_MALLOCLIKE_BLOCK(chunk, (arena_chunk_header_npages <<
- pagesize_2pow), 0, false);
-#ifdef MALLOC_STATS
- arena->stats.mapped += chunksize;
-#endif
-
- chunk->arena = arena;
-
- /*
- * Claim that no pages are in use, since the header is merely overhead.
- */
- chunk->ndirty = 0;
-
- /* Initialize the map to contain one maximal free untouched run. */
- run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
- pagesize_2pow));
- for (i = 0; i < arena_chunk_header_npages; i++)
- chunk->map[i].bits = 0;
- chunk->map[i].bits = arena_maxclass
-#ifdef MALLOC_DECOMMIT
- | CHUNK_MAP_DECOMMITTED
-#endif
- | CHUNK_MAP_ZEROED;
- for (i++; i < chunk_npages-1; i++) {
- chunk->map[i].bits =
-#ifdef MALLOC_DECOMMIT
- CHUNK_MAP_DECOMMITTED |
-#endif
- CHUNK_MAP_ZEROED;
- }
- chunk->map[chunk_npages-1].bits = arena_maxclass
-#ifdef MALLOC_DECOMMIT
- | CHUNK_MAP_DECOMMITTED
-#endif
- | CHUNK_MAP_ZEROED;
-
-#ifdef MALLOC_DECOMMIT
- /*
- * Start out decommitted, in order to force a closer correspondence
- * between dirty pages and committed untouched pages.
- */
- pages_decommit(run, arena_maxclass);
-# ifdef MALLOC_STATS
- arena->stats.ndecommit++;
- arena->stats.decommitted += (chunk_npages - arena_chunk_header_npages);
-# endif
-#endif
-
- /* Insert the run into the runs_avail tree. */
- arena_avail_tree_insert(&arena->runs_avail,
- &chunk->map[arena_chunk_header_npages]);
-}
-
-static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
-{
-
- if (arena->spare != NULL) {
- if (arena->spare->ndirty > 0) {
- arena_chunk_tree_dirty_remove(
- &chunk->arena->chunks_dirty, arena->spare);
- arena->ndirty -= arena->spare->ndirty;
- }
- VALGRIND_FREELIKE_BLOCK(arena->spare, 0);
- chunk_dealloc((void *)arena->spare, chunksize);
-#ifdef MALLOC_STATS
- arena->stats.mapped -= chunksize;
-#endif
- }
-
- /*
- * Remove run from runs_avail, regardless of whether this chunk
- * will be cached, so that the arena does not use it. Dirty page
- * flushing only uses the chunks_dirty tree, so leaving this chunk in
- * the chunks_* trees is sufficient for that purpose.
- */
- arena_avail_tree_remove(&arena->runs_avail,
- &chunk->map[arena_chunk_header_npages]);
-
- arena->spare = chunk;
-}
-
-static arena_run_t *
-arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
- bool zero)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
-
- assert(size <= arena_maxclass);
- assert((size & pagesize_mask) == 0);
-
- chunk = NULL;
- while (true) {
- /* Search the arena's chunks for the lowest best fit. */
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
- size_t pageind = ((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) /
- sizeof(arena_chunk_map_t);
-
- if (chunk != NULL)
- chunk_dealloc(chunk, chunksize);
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind
- << pagesize_2pow));
- arena_run_split(arena, run, size, large, zero);
- return (run);
- }
-
- if (arena->spare != NULL) {
- /* Use the spare. */
- chunk = arena->spare;
- arena->spare = NULL;
- run = (arena_run_t *)((uintptr_t)chunk +
- (arena_chunk_header_npages << pagesize_2pow));
- /* Insert the run into the runs_avail tree. */
- arena_avail_tree_insert(&arena->runs_avail,
- &chunk->map[arena_chunk_header_npages]);
- arena_run_split(arena, run, size, large, zero);
- return (run);
- }
-
- /*
- * No usable runs. Create a new chunk from which to allocate
- * the run.
- */
- if (chunk == NULL) {
- uint64_t chunk_seq;
-
- /*
- * Record the chunk allocation sequence number in order
- * to detect races.
- */
- arena->chunk_seq++;
- chunk_seq = arena->chunk_seq;
-
- /*
- * Drop the arena lock while allocating a chunk, since
- * reserve notifications may cause recursive
- * allocation. Dropping the lock here opens an
- * allocataion race, but we recover.
- */
- malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, true,
- true);
- malloc_mutex_lock(&arena->lock);
-
- /*
- * Check whether a race allowed a usable run to appear.
- */
- if (bin != NULL && (run = bin->runcur) != NULL &&
- run->nfree > 0) {
- if (chunk != NULL)
- chunk_dealloc(chunk, chunksize);
- return (run);
- }
-
- /*
- * If this thread raced with another such that multiple
- * chunks were allocated, make sure that there is still
- * inadequate space before using this chunk.
- */
- if (chunk_seq != arena->chunk_seq)
- continue;
-
- /*
- * Check for an error *after* checking for a race,
- * since a race could also cause a transient OOM
- * condition.
- */
- if (chunk == NULL)
- return (NULL);
- }
-
- arena_chunk_init(arena, chunk);
- run = (arena_run_t *)((uintptr_t)chunk +
- (arena_chunk_header_npages << pagesize_2pow));
- /* Update page map. */
- arena_run_split(arena, run, size, large, zero);
- return (run);
- }
-}
-
-static void
-arena_purge(arena_t *arena)
-{
- arena_chunk_t *chunk;
- size_t i, npages;
-#ifdef MALLOC_DEBUG
- size_t ndirty = 0;
- rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
- chunk) {
- ndirty += chunk->ndirty;
- } rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
- assert(ndirty == arena->ndirty);
-#endif
- assert(arena->ndirty > opt_dirty_max);
-
-#ifdef MALLOC_STATS
- arena->stats.npurge++;
-#endif
-
- /*
- * Iterate downward through chunks until enough dirty memory has been
- * purged. Terminate as soon as possible in order to minimize the
- * number of system calls, even if a chunk has only been partially
- * purged.
- */
- while (arena->ndirty > (opt_dirty_max >> 1)) {
- chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
- assert(chunk != NULL);
-
- for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
- assert(i >= arena_chunk_header_npages);
-
- if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
-#ifdef MALLOC_DECOMMIT
- assert((chunk->map[i].bits &
- CHUNK_MAP_DECOMMITTED) == 0);
-#endif
- chunk->map[i].bits ^=
-#ifdef MALLOC_DECOMMIT
- CHUNK_MAP_DECOMMITTED |
-#endif
- CHUNK_MAP_DIRTY;
- /* Find adjacent dirty run(s). */
- for (npages = 1; i > arena_chunk_header_npages
- && (chunk->map[i - 1].bits &
- CHUNK_MAP_DIRTY); npages++) {
- i--;
-#ifdef MALLOC_DECOMMIT
- assert((chunk->map[i].bits &
- CHUNK_MAP_DECOMMITTED) == 0);
-#endif
- chunk->map[i].bits ^=
-#ifdef MALLOC_DECOMMIT
- CHUNK_MAP_DECOMMITTED |
-#endif
- CHUNK_MAP_DIRTY;
- }
- chunk->ndirty -= npages;
- arena->ndirty -= npages;
-
-#ifdef MALLOC_DECOMMIT
- pages_decommit((void *)((uintptr_t)
- chunk + (i << pagesize_2pow)),
- (npages << pagesize_2pow));
-# ifdef MALLOC_STATS
- arena->stats.ndecommit++;
- arena->stats.decommitted += npages;
-# endif
-#else
- madvise((void *)((uintptr_t)chunk + (i <<
- pagesize_2pow)), (npages << pagesize_2pow),
- MADV_FREE);
-#endif
-#ifdef MALLOC_STATS
- arena->stats.nmadvise++;
- arena->stats.purged += npages;
-#endif
- if (arena->ndirty <= (opt_dirty_max >> 1))
- break;
- }
- }
-
- if (chunk->ndirty == 0) {
- arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
- chunk);
- }
- }
-}
-
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
-{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
- >> pagesize_2pow);
- assert(run_ind >= arena_chunk_header_npages);
- assert(run_ind < chunk_npages);
- if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
- size = chunk->map[run_ind].bits & ~pagesize_mask;
- else
- size = run->bin->run_size;
- run_pages = (size >> pagesize_2pow);
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- size_t i;
-
- for (i = 0; i < run_pages; i++) {
- assert((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
- == 0);
- chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
- }
-
- if (chunk->ndirty == 0) {
- arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
- chunk);
- }
- chunk->ndirty += run_pages;
- arena->ndirty += run_pages;
- } else {
- size_t i;
-
- for (i = 0; i < run_pages; i++) {
- chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED);
- }
- }
- chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
- pagesize_mask);
- chunk->map[run_ind+run_pages-1].bits = size |
- (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-
- /* Try to coalesce forward. */
- if (run_ind + run_pages < chunk_npages &&
- (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
- size_t nrun_size = chunk->map[run_ind+run_pages].bits &
- ~pagesize_mask;
-
- /*
- * Remove successor from runs_avail; the coalesced run is
- * inserted later.
- */
- arena_avail_tree_remove(&arena->runs_avail,
- &chunk->map[run_ind+run_pages]);
-
- size += nrun_size;
- run_pages = size >> pagesize_2pow;
-
- assert((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
- == nrun_size);
- chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
- pagesize_mask);
- chunk->map[run_ind+run_pages-1].bits = size |
- (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
- }
-
- /* Try to coalesce backward. */
- if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
- CHUNK_MAP_ALLOCATED) == 0) {
- size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
-
- run_ind -= prun_size >> pagesize_2pow;
-
- /*
- * Remove predecessor from runs_avail; the coalesced run is
- * inserted later.
- */
- arena_avail_tree_remove(&arena->runs_avail,
- &chunk->map[run_ind]);
-
- size += prun_size;
- run_pages = size >> pagesize_2pow;
-
- assert((chunk->map[run_ind].bits & ~pagesize_mask) ==
- prun_size);
- chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
- pagesize_mask);
- chunk->map[run_ind+run_pages-1].bits = size |
- (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
- }
-
- /* Insert into runs_avail, now that coalescing is complete. */
- arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
-
- /* Deallocate chunk if it is now completely unused. */
- if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
- CHUNK_MAP_ALLOCATED)) == arena_maxclass)
- arena_chunk_dealloc(arena, chunk);
-
- /* Enforce opt_dirty_max. */
- if (arena->ndirty > opt_dirty_max)
- arena_purge(arena);
-}
-
-static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
- size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
-
- assert(oldsize > newsize);
-
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * leading run as separately allocated.
- */
- chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
-
- arena_run_dalloc(arena, run, false);
-}
-
-static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize, bool dirty)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
- size_t npages = newsize >> pagesize_2pow;
-
- assert(oldsize > newsize);
-
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * trailing run as separately allocated.
- */
- chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
- | CHUNK_MAP_ALLOCATED;
-
- arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty);
-}
-
-static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
-{
- arena_chunk_map_t *mapelm;
- arena_run_t *run;
- unsigned i, remainder;
-
- /* Look for a usable run. */
- mapelm = arena_run_tree_first(&bin->runs);
- if (mapelm != NULL) {
- /* run is guaranteed to have available space. */
- arena_run_tree_remove(&bin->runs, mapelm);
- run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
-#ifdef MALLOC_STATS
- bin->stats.reruns++;
-#endif
- return (run);
- }
- /* No existing runs have any space available. */
-
- /* Allocate a new run. */
- run = arena_run_alloc(arena, bin, bin->run_size, false, false);
- if (run == NULL)
- return (NULL);
- /*
- * Don't initialize if a race in arena_run_alloc() allowed an existing
- * run to become usable.
- */
- if (run == bin->runcur)
- return (run);
-
- VALGRIND_MALLOCLIKE_BLOCK(run, sizeof(arena_run_t) + (sizeof(unsigned) *
- (bin->regs_mask_nelms - 1)), 0, false);
-
- /* Initialize run internals. */
- run->bin = bin;
-
- for (i = 0; i < bin->regs_mask_nelms - 1; i++)
- run->regs_mask[i] = UINT_MAX;
- remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
- if (remainder == 0)
- run->regs_mask[i] = UINT_MAX;
- else {
- /* The last element has spare bits that need to be unset. */
- run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
- - remainder));
- }
-
- run->regs_minelm = 0;
-
- run->nfree = bin->nregs;
-#ifdef MALLOC_DEBUG
- run->magic = ARENA_RUN_MAGIC;
-#endif
-
-#ifdef MALLOC_STATS
- bin->stats.nruns++;
- bin->stats.curruns++;
- if (bin->stats.curruns > bin->stats.highruns)
- bin->stats.highruns = bin->stats.curruns;
-#endif
- return (run);
-}
-
-/* bin->runcur must have space available before this function is called. */
-static inline void *
-arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
-{
- void *ret;
-
- assert(run->magic == ARENA_RUN_MAGIC);
- assert(run->nfree > 0);
-
- ret = arena_run_reg_alloc(run, bin);
- assert(ret != NULL);
- run->nfree--;
-
- return (ret);
-}
-
-/* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
-static void *
-arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
-{
-
- bin->runcur = arena_bin_nonfull_run_get(arena, bin);
- if (bin->runcur == NULL)
- return (NULL);
- assert(bin->runcur->magic == ARENA_RUN_MAGIC);
- assert(bin->runcur->nfree > 0);
-
- return (arena_bin_malloc_easy(arena, bin, bin->runcur));
-}
-
-/*
- * Calculate bin->run_size such that it meets the following constraints:
- *
- * *) bin->run_size >= min_run_size
- * *) bin->run_size <= arena_maxclass
- * *) bin->run_size <= RUN_MAX_SMALL
- * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
- *
- * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
- * also calculated here, since these settings are all interdependent.
- */
-static size_t
-arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
-{
- size_t try_run_size, good_run_size;
- unsigned good_nregs, good_mask_nelms, good_reg0_offset;
- unsigned try_nregs, try_mask_nelms, try_reg0_offset;
-
- assert(min_run_size >= pagesize);
- assert(min_run_size <= arena_maxclass);
- assert(min_run_size <= RUN_MAX_SMALL);
-
- /*
- * Calculate known-valid settings before entering the run_size
- * expansion loop, so that the first part of the loop always copies
- * valid settings.
- *
- * The do..while loop iteratively reduces the number of regions until
- * the run header and the regions no longer overlap. A closed formula
- * would be quite messy, since there is an interdependency between the
- * header's mask length and the number of regions.
- */
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size)
- + 1; /* Counter-act try_nregs-- in loop. */
- do {
- try_nregs--;
- try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
- ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
- try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
- } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
- > try_reg0_offset);
-
- /* run_size expansion loop. */
- do {
- /*
- * Copy valid settings before trying more aggressive settings.
- */
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_mask_nelms = try_mask_nelms;
- good_reg0_offset = try_reg0_offset;
-
- /* Try more aggressive settings. */
- try_run_size += pagesize;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
- do {
- try_nregs--;
- try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
- ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ?
- 1 : 0);
- try_reg0_offset = try_run_size - (try_nregs *
- bin->reg_size);
- } while (sizeof(arena_run_t) + (sizeof(unsigned) *
- (try_mask_nelms - 1)) > try_reg0_offset);
- } while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
- && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
- && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
-
- assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
- <= good_reg0_offset);
- assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
-
- /* Copy final settings. */
- bin->run_size = good_run_size;
- bin->nregs = good_nregs;
- bin->regs_mask_nelms = good_mask_nelms;
- bin->reg0_offset = good_reg0_offset;
-
- return (good_run_size);
-}
-
-#ifdef MALLOC_BALANCE
-static inline void
-arena_lock_balance(arena_t *arena)
-{
- unsigned contention;
-
- contention = malloc_spin_lock(&arena->lock);
- if (narenas > 1) {
- /*
- * Calculate the exponentially averaged contention for this
- * arena. Due to integer math always rounding down, this value
- * decays somewhat faster then normal.
- */
- arena->contention = (((uint64_t)arena->contention
- * (uint64_t)((1U << BALANCE_ALPHA_INV_2POW)-1))
- + (uint64_t)contention) >> BALANCE_ALPHA_INV_2POW;
- if (arena->contention >= opt_balance_threshold)
- arena_lock_balance_hard(arena);
- }
-}
-
-static void
-arena_lock_balance_hard(arena_t *arena)
-{
- uint32_t ind;
-
- arena->contention = 0;
-#ifdef MALLOC_STATS
- arena->stats.nbalance++;
-#endif
- ind = PRN(balance, narenas_2pow);
- if (arenas[ind] != NULL) {
-#ifdef MOZ_MEMORY_WINDOWS
- TlsSetValue(tlsIndex, arenas[ind]);
-#else
- arenas_map = arenas[ind];
-#endif
- } else {
- malloc_spin_lock(&arenas_lock);
- if (arenas[ind] != NULL) {
-#ifdef MOZ_MEMORY_WINDOWS
- TlsSetValue(tlsIndex, arenas[ind]);
-#else
- arenas_map = arenas[ind];
-#endif
- } else {
-#ifdef MOZ_MEMORY_WINDOWS
- TlsSetValue(tlsIndex, arenas_extend(ind));
-#else
- arenas_map = arenas_extend(ind);
-#endif
- }
- malloc_spin_unlock(&arenas_lock);
- }
-}
-#endif
-
-static inline void *
-arena_malloc_small(arena_t *arena, size_t size, bool zero)
-{
- void *ret;
- arena_bin_t *bin;
- arena_run_t *run;
-
- if (size < small_min) {
- /* Tiny. */
- size = pow2_ceil(size);
- bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
- 1)))];
-#if (!defined(NDEBUG) || defined(MALLOC_STATS))
- /*
- * Bin calculation is always correct, but we may need
- * to fix size for the purposes of assertions and/or
- * stats accuracy.
- */
- if (size < (1U << TINY_MIN_2POW))
- size = (1U << TINY_MIN_2POW);
-#endif
- } else if (size <= small_max) {
- /* Quantum-spaced. */
- size = QUANTUM_CEILING(size);
- bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
- - 1];
- } else {
- /* Sub-page. */
- size = pow2_ceil(size);
- bin = &arena->bins[ntbins + nqbins
- + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
- }
- assert(size == bin->reg_size);
-
-#ifdef MALLOC_BALANCE
- arena_lock_balance(arena);
-#else
- malloc_spin_lock(&arena->lock);
-#endif
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ret = arena_bin_malloc_easy(arena, bin, run);
- else
- ret = arena_bin_malloc_hard(arena, bin);
-
- if (ret == NULL) {
- malloc_spin_unlock(&arena->lock);
- return (NULL);
- }
-
-#ifdef MALLOC_STATS
- bin->stats.nrequests++;
- arena->stats.nmalloc_small++;
- arena->stats.allocated_small += size;
-#endif
- malloc_spin_unlock(&arena->lock);
-
- VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, zero);
- if (zero == false) {
-#ifdef MALLOC_FILL
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
-#endif
- } else
- memset(ret, 0, size);
-
- return (ret);
-}
-
-static void *
-arena_malloc_large(arena_t *arena, size_t size, bool zero)
-{
- void *ret;
-
- /* Large allocation. */
- size = PAGE_CEILING(size);
-#ifdef MALLOC_BALANCE
- arena_lock_balance(arena);
-#else
- malloc_spin_lock(&arena->lock);
-#endif
- ret = (void *)arena_run_alloc(arena, NULL, size, true, zero);
- if (ret == NULL) {
- malloc_spin_unlock(&arena->lock);
- return (NULL);
- }
-#ifdef MALLOC_STATS
- arena->stats.nmalloc_large++;
- arena->stats.allocated_large += size;
-#endif
- malloc_spin_unlock(&arena->lock);
-
- VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, zero);
- if (zero == false) {
-#ifdef MALLOC_FILL
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
-#endif
- }
-
- return (ret);
-}
-
-static inline void *
-arena_malloc(arena_t *arena, size_t size, bool zero)
-{
-
- assert(arena != NULL);
- assert(arena->magic == ARENA_MAGIC);
- assert(size != 0);
- assert(QUANTUM_CEILING(size) <= arena_maxclass);
-
- if (size <= bin_maxclass) {
- return (arena_malloc_small(arena, size, zero));
- } else
- return (arena_malloc_large(arena, size, zero));
-}
-
-static inline void *
-imalloc(size_t size)
-{
-
- assert(size != 0);
-
- if (size <= arena_maxclass)
- return (arena_malloc(choose_arena(), size, false));
- else
- return (huge_malloc(size, false));
-}
-
-static inline void *
-icalloc(size_t size)
-{
-
- if (size <= arena_maxclass)
- return (arena_malloc(choose_arena(), size, true));
- else
- return (huge_malloc(size, true));
-}
-
-/* Only handles large allocations that require more than page alignment. */
-static void *
-arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
-{
- void *ret;
- size_t offset;
- arena_chunk_t *chunk;
-
- assert((size & pagesize_mask) == 0);
- assert((alignment & pagesize_mask) == 0);
-
-#ifdef MALLOC_BALANCE
- arena_lock_balance(arena);
-#else
- malloc_spin_lock(&arena->lock);
-#endif
- ret = (void *)arena_run_alloc(arena, NULL, alloc_size, true, false);
- if (ret == NULL) {
- malloc_spin_unlock(&arena->lock);
- return (NULL);
- }
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
-
- offset = (uintptr_t)ret & (alignment - 1);
- assert((offset & pagesize_mask) == 0);
- assert(offset < alloc_size);
- if (offset == 0)
- arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
- else {
- size_t leadsize, trailsize;
-
- leadsize = alignment - offset;
- if (leadsize > 0) {
- arena_run_trim_head(arena, chunk, (arena_run_t*)ret, alloc_size,
- alloc_size - leadsize);
- ret = (void *)((uintptr_t)ret + leadsize);
- }
-
- trailsize = alloc_size - leadsize - size;
- if (trailsize != 0) {
- /* Trim trailing space. */
- assert(trailsize < alloc_size);
- arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
- size, false);
- }
- }
-
-#ifdef MALLOC_STATS
- arena->stats.nmalloc_large++;
- arena->stats.allocated_large += size;
-#endif
- malloc_spin_unlock(&arena->lock);
-
- VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, false);
-#ifdef MALLOC_FILL
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
-#endif
- return (ret);
-}
-
-static inline void *
-ipalloc(size_t alignment, size_t size)
-{
- void *ret;
- size_t ceil_size;
-
- /*
- * Round size up to the nearest multiple of alignment.
- *
- * This done, we can take advantage of the fact that for each small
- * size class, every object is aligned at the smallest power of two
- * that is non-zero in the base two representation of the size. For
- * example:
- *
- * Size | Base 2 | Minimum alignment
- * -----+----------+------------------
- * 96 | 1100000 | 32
- * 144 | 10100000 | 32
- * 192 | 11000000 | 64
- *
- * Depending on runtime settings, it is possible that arena_malloc()
- * will further round up to a power of two, but that never causes
- * correctness issues.
- */
- ceil_size = (size + (alignment - 1)) & (-alignment);
- /*
- * (ceil_size < size) protects against the combination of maximal
- * alignment and size greater than maximal alignment.
- */
- if (ceil_size < size) {
- /* size_t overflow. */
- return (NULL);
- }
-
- if (ceil_size <= pagesize || (alignment <= pagesize
- && ceil_size <= arena_maxclass))
- ret = arena_malloc(choose_arena(), ceil_size, false);
- else {
- size_t run_size;
-
- /*
- * We can't achieve sub-page alignment, so round up alignment
- * permanently; it makes later calculations simpler.
- */
- alignment = PAGE_CEILING(alignment);
- ceil_size = PAGE_CEILING(size);
- /*
- * (ceil_size < size) protects against very large sizes within
- * pagesize of SIZE_T_MAX.
- *
- * (ceil_size + alignment < ceil_size) protects against the
- * combination of maximal alignment and ceil_size large enough
- * to cause overflow. This is similar to the first overflow
- * check above, but it needs to be repeated due to the new
- * ceil_size value, which may now be *equal* to maximal
- * alignment, whereas before we only detected overflow if the
- * original size was *greater* than maximal alignment.
- */
- if (ceil_size < size || ceil_size + alignment < ceil_size) {
- /* size_t overflow. */
- return (NULL);
- }
-
- /*
- * Calculate the size of the over-size run that arena_palloc()
- * would need to allocate in order to guarantee the alignment.
- */
- if (ceil_size >= alignment)
- run_size = ceil_size + alignment - pagesize;
- else {
- /*
- * It is possible that (alignment << 1) will cause
- * overflow, but it doesn't matter because we also
- * subtract pagesize, which in the case of overflow
- * leaves us with a very large run_size. That causes
- * the first conditional below to fail, which means
- * that the bogus run_size value never gets used for
- * anything important.
- */
- run_size = (alignment << 1) - pagesize;
- }
-
- if (run_size <= arena_maxclass) {
- ret = arena_palloc(choose_arena(), alignment, ceil_size,
- run_size);
- } else if (alignment <= chunksize)
- ret = huge_malloc(ceil_size, false);
- else
- ret = huge_palloc(alignment, ceil_size);
- }
-
- assert(((uintptr_t)ret & (alignment - 1)) == 0);
- return (ret);
-}
-
-/* Return the size of the allocation pointed to by ptr. */
-static size_t
-arena_salloc(const void *ptr)
-{
- size_t ret;
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
- mapbits = chunk->map[pageind].bits;
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
- assert(run->magic == ARENA_RUN_MAGIC);
- ret = run->bin->reg_size;
- } else {
- ret = mapbits & ~pagesize_mask;
- assert(ret != 0);
- }
-
- return (ret);
-}
-
-#if (defined(MALLOC_VALIDATE) || defined(MOZ_MEMORY_DARWIN))
-/*
- * Validate ptr before assuming that it points to an allocation. Currently,
- * the following validation is performed:
- *
- * + Check that ptr is not NULL.
- *
- * + Check that ptr lies within a mapped chunk.
- */
-static inline size_t
-isalloc_validate(const void *ptr)
-{
- arena_chunk_t *chunk;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk == NULL)
- return (0);
-
- if (malloc_rtree_get(chunk_rtree, (uintptr_t)chunk) == NULL)
- return (0);
-
- if (chunk != ptr) {
- assert(chunk->arena->magic == ARENA_MAGIC);
- return (arena_salloc(ptr));
- } else {
- size_t ret;
- extent_node_t *node;
- extent_node_t key;
-
- /* Chunk. */
- key.addr = (void *)chunk;
- malloc_mutex_lock(&huge_mtx);
- node = extent_tree_ad_search(&huge, &key);
- if (node != NULL)
- ret = node->size;
- else
- ret = 0;
- malloc_mutex_unlock(&huge_mtx);
- return (ret);
- }
-}
-#endif
-
-static inline size_t
-isalloc(const void *ptr)
-{
- size_t ret;
- arena_chunk_t *chunk;
-
- assert(ptr != NULL);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr) {
- /* Region. */
- assert(chunk->arena->magic == ARENA_MAGIC);
-
- ret = arena_salloc(ptr);
- } else {
- extent_node_t *node, key;
-
- /* Chunk (huge allocation). */
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
-
- ret = node->size;
-
- malloc_mutex_unlock(&huge_mtx);
- }
-
- return (ret);
-}
-
-static inline void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_t *mapelm)
-{
- arena_run_t *run;
- arena_bin_t *bin;
- size_t size;
-
- run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
- assert(run->magic == ARENA_RUN_MAGIC);
- bin = run->bin;
- size = bin->reg_size;
-
-#ifdef MALLOC_FILL
- if (opt_junk)
- memset(ptr, 0x5a, size);
-#endif
-
- arena_run_reg_dalloc(run, bin, ptr, size);
- run->nfree++;
-
- if (run->nfree == bin->nregs) {
- /* Deallocate run. */
- if (run == bin->runcur)
- bin->runcur = NULL;
- else if (bin->nregs != 1) {
- size_t run_pageind = (((uintptr_t)run -
- (uintptr_t)chunk)) >> pagesize_2pow;
- arena_chunk_map_t *run_mapelm =
- &chunk->map[run_pageind];
- /*
- * This block's conditional is necessary because if the
- * run only contains one region, then it never gets
- * inserted into the non-full runs tree.
- */
- assert(arena_run_tree_search(&bin->runs, run_mapelm) ==
- run_mapelm);
- arena_run_tree_remove(&bin->runs, run_mapelm);
- }
-#ifdef MALLOC_DEBUG
- run->magic = 0;
-#endif
- VALGRIND_FREELIKE_BLOCK(run, 0);
- arena_run_dalloc(arena, run, true);
-#ifdef MALLOC_STATS
- bin->stats.curruns--;
-#endif
- } else if (run->nfree == 1 && run != bin->runcur) {
- /*
- * Make sure that bin->runcur always refers to the lowest
- * non-full run, if one exists.
- */
- if (bin->runcur == NULL)
- bin->runcur = run;
- else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
- /* Switch runcur. */
- if (bin->runcur->nfree > 0) {
- arena_chunk_t *runcur_chunk =
- (arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
- size_t runcur_pageind =
- (((uintptr_t)bin->runcur -
- (uintptr_t)runcur_chunk)) >> pagesize_2pow;
- arena_chunk_map_t *runcur_mapelm =
- &runcur_chunk->map[runcur_pageind];
-
- /* Insert runcur. */
- assert(arena_run_tree_search(&bin->runs,
- runcur_mapelm) == NULL);
- arena_run_tree_insert(&bin->runs,
- runcur_mapelm);
- }
- bin->runcur = run;
- } else {
- size_t run_pageind = (((uintptr_t)run -
- (uintptr_t)chunk)) >> pagesize_2pow;
- arena_chunk_map_t *run_mapelm =
- &chunk->map[run_pageind];
-
- assert(arena_run_tree_search(&bin->runs, run_mapelm) ==
- NULL);
- arena_run_tree_insert(&bin->runs, run_mapelm);
- }
- }
-#ifdef MALLOC_STATS
- arena->stats.allocated_small -= size;
- arena->stats.ndalloc_small++;
-#endif
-}
-
-static void
-arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
- /* Large allocation. */
- malloc_spin_lock(&arena->lock);
-
-#ifdef MALLOC_FILL
-#ifndef MALLOC_STATS
- if (opt_junk)
-#endif
-#endif
- {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
- pagesize_2pow;
- size_t size = chunk->map[pageind].bits & ~pagesize_mask;
-
-#ifdef MALLOC_FILL
-#ifdef MALLOC_STATS
- if (opt_junk)
-#endif
- memset(ptr, 0x5a, size);
-#endif
-#ifdef MALLOC_STATS
- arena->stats.allocated_large -= size;
-#endif
- }
-#ifdef MALLOC_STATS
- arena->stats.ndalloc_large++;
-#endif
-
- arena_run_dalloc(arena, (arena_run_t *)ptr, true);
- malloc_spin_unlock(&arena->lock);
-}
-
-static inline void
-arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
- size_t pageind;
- arena_chunk_map_t *mapelm;
-
- assert(arena != NULL);
- assert(arena->magic == ARENA_MAGIC);
- assert(chunk->arena == arena);
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
- mapelm = &chunk->map[pageind];
- assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
- /* Small allocation. */
- malloc_spin_lock(&arena->lock);
- arena_dalloc_small(arena, chunk, ptr, mapelm);
- malloc_spin_unlock(&arena->lock);
- } else
- arena_dalloc_large(arena, chunk, ptr);
- VALGRIND_FREELIKE_BLOCK(ptr, 0);
-}
-
-static inline void
-idalloc(void *ptr)
-{
- arena_chunk_t *chunk;
-
- assert(ptr != NULL);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr);
- else
- huge_dalloc(ptr);
-}
-
-static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t size, size_t oldsize)
-{
-
- assert(size < oldsize);
-
- /*
- * Shrink the run, and make trailing pages available for other
- * allocations.
- */
-#ifdef MALLOC_BALANCE
- arena_lock_balance(arena);
-#else
- malloc_spin_lock(&arena->lock);
-#endif
- arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
- true);
-#ifdef MALLOC_STATS
- arena->stats.allocated_large -= oldsize - size;
-#endif
- malloc_spin_unlock(&arena->lock);
-}
-
-static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t size, size_t oldsize)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
- size_t npages = oldsize >> pagesize_2pow;
-
- assert(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
-
- /* Try to extend the run. */
- assert(size > oldsize);
-#ifdef MALLOC_BALANCE
- arena_lock_balance(arena);
-#else
- malloc_spin_lock(&arena->lock);
-#endif
- if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
- & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
- ~pagesize_mask) >= size - oldsize) {
- /*
- * The next run is available and sufficiently large. Split the
- * following run, then merge the first part with the existing
- * allocation.
- */
- arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
- false);
-
- chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
-
-#ifdef MALLOC_STATS
- arena->stats.allocated_large += size - oldsize;
-#endif
- malloc_spin_unlock(&arena->lock);
- return (false);
- }
- malloc_spin_unlock(&arena->lock);
-
- return (true);
-}
-
-/*
- * Try to resize a large allocation, in order to avoid copying. This will
- * always fail if growing an object, and the following run is already in use.
- */
-static bool
-arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
-{
- size_t psize;
-
- psize = PAGE_CEILING(size);
- if (psize == oldsize) {
- /* Same size class. */
-#ifdef MALLOC_FILL
- if (opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
- size);
- }
-#endif
- return (false);
- } else {
- arena_chunk_t *chunk;
- arena_t *arena;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
- assert(arena->magic == ARENA_MAGIC);
-
- if (psize < oldsize) {
-#ifdef MALLOC_FILL
- /* Fill before shrinking in order avoid a race. */
- if (opt_junk) {
- memset((void *)((uintptr_t)ptr + size), 0x5a,
- oldsize - size);
- }
-#endif
- arena_ralloc_large_shrink(arena, chunk, ptr, psize,
- oldsize);
- return (false);
- } else {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
- psize, oldsize);
-#ifdef MALLOC_FILL
- if (ret == false && opt_zero) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- size - oldsize);
- }
-#endif
- return (ret);
- }
- }
-}
-
-static void *
-arena_ralloc(void *ptr, size_t size, size_t oldsize)
-{
- void *ret;
- size_t copysize;
-
- /* Try to avoid moving the allocation. */
- if (size < small_min) {
- if (oldsize < small_min &&
- ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
- == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
- goto IN_PLACE; /* Same size class. */
- } else if (size <= small_max) {
- if (oldsize >= small_min && oldsize <= small_max &&
- (QUANTUM_CEILING(size) >> opt_quantum_2pow)
- == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
- goto IN_PLACE; /* Same size class. */
- } else if (size <= bin_maxclass) {
- if (oldsize > small_max && oldsize <= bin_maxclass &&
- pow2_ceil(size) == pow2_ceil(oldsize))
- goto IN_PLACE; /* Same size class. */
- } else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
- assert(size > bin_maxclass);
- if (arena_ralloc_large(ptr, size, oldsize) == false)
- return (ptr);
- }
-
- /*
- * If we get here, then size and oldsize are different enough that we
- * need to move the object. In that case, fall back to allocating new
- * space and copying.
- */
- ret = arena_malloc(choose_arena(), size, false);
- if (ret == NULL)
- return (NULL);
-
- /* Junk/zero-filling were already done by arena_malloc(). */
- copysize = (size < oldsize) ? size : oldsize;
-#ifdef VM_COPY_MIN
- if (copysize >= VM_COPY_MIN)
- pages_copy(ret, ptr, copysize);
- else
-#endif
- memcpy(ret, ptr, copysize);
- idalloc(ptr);
- return (ret);
-IN_PLACE:
-#ifdef MALLOC_FILL
- if (opt_junk && size < oldsize)
- memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
- else if (opt_zero && size > oldsize)
- memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
-#endif
- return (ptr);
-}
-
-static inline void *
-iralloc(void *ptr, size_t size)
-{
- size_t oldsize;
-
- assert(ptr != NULL);
- assert(size != 0);
-
- oldsize = isalloc(ptr);
-
-#ifndef MALLOC_VALGRIND
- if (size <= arena_maxclass)
- return (arena_ralloc(ptr, size, oldsize));
- else
- return (huge_ralloc(ptr, size, oldsize));
-#else
- /*
- * Valgrind does not provide a public interface for modifying an
- * existing allocation, so use malloc/memcpy/free instead.
- */
- {
- void *ret = imalloc(size);
- if (ret != NULL) {
- if (oldsize < size)
- memcpy(ret, ptr, oldsize);
- else
- memcpy(ret, ptr, size);
- idalloc(ptr);
- }
- return (ret);
- }
-#endif
-}
-
-static bool
-arena_new(arena_t *arena)
-{
- unsigned i;
- arena_bin_t *bin;
- size_t pow2_size, prev_run_size;
-
- if (malloc_spin_init(&arena->lock))
- return (true);
-
-#ifdef MALLOC_STATS
- memset(&arena->stats, 0, sizeof(arena_stats_t));
-#endif
-
- arena->chunk_seq = 0;
-
- /* Initialize chunks. */
- arena_chunk_tree_dirty_new(&arena->chunks_dirty);
- arena->spare = NULL;
-
- arena->ndirty = 0;
-
- arena_avail_tree_new(&arena->runs_avail);
-
-#ifdef MALLOC_BALANCE
- arena->contention = 0;
-#endif
-
- /* Initialize bins. */
- prev_run_size = pagesize;
-
- /* (2^n)-spaced tiny bins. */
- for (i = 0; i < ntbins; i++) {
- bin = &arena->bins[i];
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
-
- bin->reg_size = (1U << (TINY_MIN_2POW + i));
-
- prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-#ifdef MALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
- }
-
- /* Quantum-spaced bins. */
- for (; i < ntbins + nqbins; i++) {
- bin = &arena->bins[i];
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
-
- bin->reg_size = quantum * (i - ntbins + 1);
-
- pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
- prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-#ifdef MALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
- }
-
- /* (2^n)-spaced sub-page bins. */
- for (; i < ntbins + nqbins + nsbins; i++) {
- bin = &arena->bins[i];
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
-
- bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
-
- prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-#ifdef MALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
- }
-
-#ifdef MALLOC_DEBUG
- arena->magic = ARENA_MAGIC;
-#endif
-
- return (false);
-}
-
-/* Create a new arena and insert it into the arenas array at index ind. */
-static arena_t *
-arenas_extend(unsigned ind)
-{
- arena_t *ret;
-
- /* Allocate enough space for trailing bins. */
- ret = (arena_t *)base_alloc(sizeof(arena_t)
- + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
- if (ret != NULL && arena_new(ret) == false) {
- arenas[ind] = ret;
- return (ret);
- }
- /* Only reached if there is an OOM error. */
-
- /*
- * OOM here is quite inconvenient to propagate, since dealing with it
- * would require a check for failure in the fast path. Instead, punt
- * by using arenas[0]. In practice, this is an extremely unlikely
- * failure.
- */
- _malloc_message(_getprogname(),
- ": (malloc) Error initializing arena\n", "", "");
- if (opt_abort)
- abort();
-
- return (arenas[0]);
-}
-
-/*
- * End arena.
- */
-/******************************************************************************/
-/*
- * Begin general internal functions.
- */
-
-static void *
-huge_malloc(size_t size, bool zero)
-{
- void *ret;
- size_t csize;
-#ifdef MALLOC_DECOMMIT
- size_t psize;
-#endif
- extent_node_t *node;
-
- /* Allocate one or more contiguous chunks for this request. */
-
- csize = CHUNK_CEILING(size);
- if (csize == 0) {
- /* size is large enough to cause size_t wrap-around. */
- return (NULL);
- }
-
- /* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
- if (node == NULL)
- return (NULL);
-
- ret = chunk_alloc(csize, zero, true);
- if (ret == NULL) {
- base_node_dealloc(node);
- return (NULL);
- }
-
- /* Insert node into huge. */
- node->addr = ret;
-#ifdef MALLOC_DECOMMIT
- psize = PAGE_CEILING(size);
- node->size = psize;
-#else
- node->size = csize;
-#endif
-
- malloc_mutex_lock(&huge_mtx);
- extent_tree_ad_insert(&huge, node);
-#ifdef MALLOC_STATS
- huge_nmalloc++;
-# ifdef MALLOC_DECOMMIT
- huge_allocated += psize;
-# else
- huge_allocated += csize;
-# endif
-#endif
- malloc_mutex_unlock(&huge_mtx);
-
-#ifdef MALLOC_DECOMMIT
- if (csize - psize > 0)
- pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
-#endif
-
-#ifdef MALLOC_DECOMMIT
- VALGRIND_MALLOCLIKE_BLOCK(ret, psize, 0, zero);
-#else
- VALGRIND_MALLOCLIKE_BLOCK(ret, csize, 0, zero);
-#endif
-
-#ifdef MALLOC_FILL
- if (zero == false) {
- if (opt_junk)
-# ifdef MALLOC_DECOMMIT
- memset(ret, 0xa5, psize);
-# else
- memset(ret, 0xa5, csize);
-# endif
- else if (opt_zero)
-# ifdef MALLOC_DECOMMIT
- memset(ret, 0, psize);
-# else
- memset(ret, 0, csize);
-# endif
- }
-#endif
-
- return (ret);
-}
-
-/* Only handles large allocations that require more than chunk alignment. */
-static void *
-huge_palloc(size_t alignment, size_t size)
-{
- void *ret;
- size_t alloc_size, chunk_size, offset;
-#ifdef MALLOC_DECOMMIT
- size_t psize;
-#endif
- extent_node_t *node;
- int pfd;
-
- /*
- * This allocation requires alignment that is even larger than chunk
- * alignment. This means that huge_malloc() isn't good enough.
- *
- * Allocate almost twice as many chunks as are demanded by the size or
- * alignment, in order to assure the alignment can be achieved, then
- * unmap leading and trailing chunks.
- */
- assert(alignment >= chunksize);
-
- chunk_size = CHUNK_CEILING(size);
-
- if (size >= alignment)
- alloc_size = chunk_size + alignment - chunksize;
- else
- alloc_size = (alignment << 1) - chunksize;
-
- /* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
- if (node == NULL)
- return (NULL);
-
- /*
- * Windows requires that there be a 1:1 mapping between VM
- * allocation/deallocation operations. Therefore, take care here to
- * acquire the final result via one mapping operation.
- *
- * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
- * since it reduces the number of page files.
- */
-#ifdef MALLOC_PAGEFILE
- if (opt_pagefile) {
- pfd = pagefile_init(size);
- if (pfd == -1)
- return (NULL);
- } else
-#endif
- pfd = -1;
-#ifdef JEMALLOC_USES_MAP_ALIGN
- ret = pages_map_align(chunk_size, pfd, alignment);
-#else
- do {
- void *over;
-
- over = chunk_alloc(alloc_size, false, false);
- if (over == NULL) {
- base_node_dealloc(node);
- ret = NULL;
- goto RETURN;
- }
-
- offset = (uintptr_t)over & (alignment - 1);
- assert((offset & chunksize_mask) == 0);
- assert(offset < alloc_size);
- ret = (void *)((uintptr_t)over + offset);
- chunk_dealloc(over, alloc_size);
- ret = pages_map(ret, chunk_size, pfd);
- /*
- * Failure here indicates a race with another thread, so try
- * again.
- */
- } while (ret == NULL);
-#endif
- /* Insert node into huge. */
- node->addr = ret;
-#ifdef MALLOC_DECOMMIT
- psize = PAGE_CEILING(size);
- node->size = psize;
-#else
- node->size = chunk_size;
-#endif
-
- malloc_mutex_lock(&huge_mtx);
- extent_tree_ad_insert(&huge, node);
-#ifdef MALLOC_STATS
- huge_nmalloc++;
-# ifdef MALLOC_DECOMMIT
- huge_allocated += psize;
-# else
- huge_allocated += chunk_size;
-# endif
-#endif
- malloc_mutex_unlock(&huge_mtx);
-
-#ifdef MALLOC_DECOMMIT
- if (chunk_size - psize > 0) {
- pages_decommit((void *)((uintptr_t)ret + psize),
- chunk_size - psize);
- }
-#endif
-
-#ifdef MALLOC_DECOMMIT
- VALGRIND_MALLOCLIKE_BLOCK(ret, psize, 0, false);
-#else
- VALGRIND_MALLOCLIKE_BLOCK(ret, chunk_size, 0, false);
-#endif
-
-#ifdef MALLOC_FILL
- if (opt_junk)
-# ifdef MALLOC_DECOMMIT
- memset(ret, 0xa5, psize);
-# else
- memset(ret, 0xa5, chunk_size);
-# endif
- else if (opt_zero)
-# ifdef MALLOC_DECOMMIT
- memset(ret, 0, psize);
-# else
- memset(ret, 0, chunk_size);
-# endif
-#endif
-
-RETURN:
-#ifdef MALLOC_PAGEFILE
- if (pfd != -1)
- pagefile_close(pfd);
-#endif
- return (ret);
-}
-
-static void *
-huge_ralloc(void *ptr, size_t size, size_t oldsize)
-{
- void *ret;
- size_t copysize;
-
- /* Avoid moving the allocation if the size class would not change. */
-
- if (oldsize > arena_maxclass &&
- CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
-#ifdef MALLOC_DECOMMIT
- size_t psize = PAGE_CEILING(size);
-#endif
-#ifdef MALLOC_FILL
- if (opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
- - size);
- }
-#endif
-#ifdef MALLOC_DECOMMIT
- if (psize < oldsize) {
- extent_node_t *node, key;
-
- pages_decommit((void *)((uintptr_t)ptr + psize),
- oldsize - psize);
-
- /* Update recorded size. */
- malloc_mutex_lock(&huge_mtx);
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->size == oldsize);
-# ifdef MALLOC_STATS
- huge_allocated -= oldsize - psize;
-# endif
- node->size = psize;
- malloc_mutex_unlock(&huge_mtx);
- } else if (psize > oldsize) {
- extent_node_t *node, key;
-
- pages_commit((void *)((uintptr_t)ptr + oldsize),
- psize - oldsize);
-
- /* Update recorded size. */
- malloc_mutex_lock(&huge_mtx);
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->size == oldsize);
-# ifdef MALLOC_STATS
- huge_allocated += psize - oldsize;
-# endif
- node->size = psize;
- malloc_mutex_unlock(&huge_mtx);
- }
-#endif
-#ifdef MALLOC_FILL
- if (opt_zero && size > oldsize) {
- memset((void *)((uintptr_t)ptr + oldsize), 0, size
- - oldsize);
- }
-#endif
- return (ptr);
- }
-
- /*
- * If we get here, then size and oldsize are different enough that we
- * need to use a different size class. In that case, fall back to
- * allocating new space and copying.
- */
- ret = huge_malloc(size, false);
- if (ret == NULL)
- return (NULL);
-
- copysize = (size < oldsize) ? size : oldsize;
-#ifdef VM_COPY_MIN
- if (copysize >= VM_COPY_MIN)
- pages_copy(ret, ptr, copysize);
- else
-#endif
- memcpy(ret, ptr, copysize);
- idalloc(ptr);
- return (ret);
-}
-
-static void
-huge_dalloc(void *ptr)
-{
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
- extent_tree_ad_remove(&huge, node);
-
-#ifdef MALLOC_STATS
- huge_ndalloc++;
- huge_allocated -= node->size;
-#endif
-
- malloc_mutex_unlock(&huge_mtx);
-
- /* Unmap chunk. */
-#ifdef MALLOC_FILL
- if (opt_junk)
- memset(node->addr, 0x5a, node->size);
-#endif
-#ifdef MALLOC_DECOMMIT
- chunk_dealloc(node->addr, CHUNK_CEILING(node->size));
-#else
- chunk_dealloc(node->addr, node->size);
-#endif
- VALGRIND_FREELIKE_BLOCK(node->addr, 0);
-
- base_node_dealloc(node);
-}
-
-#ifdef MOZ_MEMORY_BSD
-static inline unsigned
-malloc_ncpus(void)
-{
- unsigned ret;
- int mib[2];
- size_t len;
-
- mib[0] = CTL_HW;
- mib[1] = HW_NCPU;
- len = sizeof(ret);
- if (sysctl(mib, 2, &ret, &len, (void *) 0, 0) == -1) {
- /* Error. */
- return (1);
- }
-
- return (ret);
-}
-#elif (defined(MOZ_MEMORY_LINUX))
-#include <fcntl.h>
-
-static inline unsigned
-malloc_ncpus(void)
-{
- unsigned ret;
- int fd, nread, column;
- char buf[1024];
- static const char matchstr[] = "processor\t:";
- int i;
-
- /*
- * sysconf(3) would be the preferred method for determining the number
- * of CPUs, but it uses malloc internally, which causes untennable
- * recursion during malloc initialization.
- */
- fd = open("/proc/cpuinfo", O_RDONLY);
- if (fd == -1)
- return (1); /* Error. */
- /*
- * Count the number of occurrences of matchstr at the beginnings of
- * lines. This treats hyperthreaded CPUs as multiple processors.
- */
- column = 0;
- ret = 0;
- while (true) {
- nread = read(fd, &buf, sizeof(buf));
- if (nread <= 0)
- break; /* EOF or error. */
- for (i = 0;i < nread;i++) {
- char c = buf[i];
- if (c == '\n')
- column = 0;
- else if (column != -1) {
- if (c == matchstr[column]) {
- column++;
- if (column == sizeof(matchstr) - 1) {
- column = -1;
- ret++;
- }
- } else
- column = -1;
- }
- }
- }
-
- if (ret == 0)
- ret = 1; /* Something went wrong in the parser. */
- close(fd);
-
- return (ret);
-}
-#elif (defined(MOZ_MEMORY_DARWIN))
-#include <mach/mach_init.h>
-#include <mach/mach_host.h>
-
-static inline unsigned
-malloc_ncpus(void)
-{
- kern_return_t error;
- natural_t n;
- processor_info_array_t pinfo;
- mach_msg_type_number_t pinfocnt;
-
- error = host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO,
- &n, &pinfo, &pinfocnt);
- if (error != KERN_SUCCESS)
- return (1); /* Error. */
- else
- return (n);
-}
-#elif (defined(MOZ_MEMORY_SOLARIS))
-
-static inline unsigned
-malloc_ncpus(void)
-{
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-#else
-static inline unsigned
-malloc_ncpus(void)
-{
-
- /*
- * We lack a way to determine the number of CPUs on this platform, so
- * assume 1 CPU.
- */
- return (1);
-}
-#endif
-
-static void
-malloc_print_stats(void)
-{
-
- if (opt_print_stats) {
- char s[UMAX2S_BUFSIZE];
- _malloc_message("___ Begin malloc statistics ___\n", "", "",
- "");
- _malloc_message("Assertions ",
-#ifdef NDEBUG
- "disabled",
-#else
- "enabled",
-#endif
- "\n", "");
- _malloc_message("Boolean MALLOC_OPTIONS: ",
- opt_abort ? "A" : "a", "", "");
-#ifdef MALLOC_FILL
- _malloc_message(opt_junk ? "J" : "j", "", "", "");
-#endif
-#ifdef MALLOC_PAGEFILE
- _malloc_message(opt_pagefile ? "o" : "O", "", "", "");
-#endif
- _malloc_message("P", "", "", "");
-#ifdef MALLOC_UTRACE
- _malloc_message(opt_utrace ? "U" : "u", "", "", "");
-#endif
-#ifdef MALLOC_SYSV
- _malloc_message(opt_sysv ? "V" : "v", "", "", "");
-#endif
-#ifdef MALLOC_XMALLOC
- _malloc_message(opt_xmalloc ? "X" : "x", "", "", "");
-#endif
-#ifdef MALLOC_FILL
- _malloc_message(opt_zero ? "Z" : "z", "", "", "");
-#endif
- _malloc_message("\n", "", "", "");
-
- _malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
- _malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
-#ifdef MALLOC_BALANCE
- _malloc_message("Arena balance threshold: ",
- umax2s(opt_balance_threshold, s), "\n", "");
-#endif
- _malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
- "\n", "");
- _malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
- _malloc_message("Max small size: ", umax2s(small_max, s), "\n",
- "");
- _malloc_message("Max dirty pages per arena: ",
- umax2s(opt_dirty_max, s), "\n", "");
-
- _malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
- _malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
-
-#ifdef MALLOC_STATS
- {
- size_t allocated, mapped;
-#ifdef MALLOC_BALANCE
- uint64_t nbalance = 0;
-#endif
- unsigned i;
- arena_t *arena;
-
- /* Calculate and print allocated/mapped stats. */
-
- /* arenas. */
- for (i = 0, allocated = 0; i < narenas; i++) {
- if (arenas[i] != NULL) {
- malloc_spin_lock(&arenas[i]->lock);
- allocated +=
- arenas[i]->stats.allocated_small;
- allocated +=
- arenas[i]->stats.allocated_large;
-#ifdef MALLOC_BALANCE
- nbalance += arenas[i]->stats.nbalance;
-#endif
- malloc_spin_unlock(&arenas[i]->lock);
- }
- }
-
- /* huge/base. */
- malloc_mutex_lock(&huge_mtx);
- allocated += huge_allocated;
- mapped = stats_chunks.curchunks * chunksize;
- malloc_mutex_unlock(&huge_mtx);
-
- malloc_mutex_lock(&base_mtx);
- mapped += base_mapped;
- malloc_mutex_unlock(&base_mtx);
-
-#ifdef MOZ_MEMORY_WINDOWS
- malloc_printf("Allocated: %lu, mapped: %lu\n",
- allocated, mapped);
-#else
- malloc_printf("Allocated: %zu, mapped: %zu\n",
- allocated, mapped);
-#endif
-
- malloc_mutex_lock(&reserve_mtx);
- malloc_printf("Reserve: min "
- "cur max\n");
-#ifdef MOZ_MEMORY_WINDOWS
- malloc_printf(" %12lu %12lu %12lu\n",
- CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
- reserve_cur >> opt_chunk_2pow,
- reserve_max >> opt_chunk_2pow);
-#else
- malloc_printf(" %12zu %12zu %12zu\n",
- CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
- reserve_cur >> opt_chunk_2pow,
- reserve_max >> opt_chunk_2pow);
-#endif
- malloc_mutex_unlock(&reserve_mtx);
-
-#ifdef MALLOC_BALANCE
- malloc_printf("Arena balance reassignments: %llu\n",
- nbalance);
-#endif
-
- /* Print chunk stats. */
- {
- chunk_stats_t chunks_stats;
-
- malloc_mutex_lock(&huge_mtx);
- chunks_stats = stats_chunks;
- malloc_mutex_unlock(&huge_mtx);
-
- malloc_printf("chunks: nchunks "
- "highchunks curchunks\n");
- malloc_printf(" %13llu%13lu%13lu\n",
- chunks_stats.nchunks,
- chunks_stats.highchunks,
- chunks_stats.curchunks);
- }
-
- /* Print chunk stats. */
- malloc_printf(
- "huge: nmalloc ndalloc allocated\n");
-#ifdef MOZ_MEMORY_WINDOWS
- malloc_printf(" %12llu %12llu %12lu\n",
- huge_nmalloc, huge_ndalloc, huge_allocated);
-#else
- malloc_printf(" %12llu %12llu %12zu\n",
- huge_nmalloc, huge_ndalloc, huge_allocated);
-#endif
- /* Print stats for each arena. */
- for (i = 0; i < narenas; i++) {
- arena = arenas[i];
- if (arena != NULL) {
- malloc_printf(
- "\narenas[%u]:\n", i);
- malloc_spin_lock(&arena->lock);
- stats_print(arena);
- malloc_spin_unlock(&arena->lock);
- }
- }
- }
-#endif /* #ifdef MALLOC_STATS */
- _malloc_message("--- End malloc statistics ---\n", "", "", "");
- }
-}
-
-/*
- * FreeBSD's pthreads implementation calls malloc(3), so the malloc
- * implementation has to take pains to avoid infinite recursion during
- * initialization.
- */
-#if (defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_DARWIN)) && !defined(MOZ_MEMORY_WINCE)
-#define malloc_init() false
-#else
-static inline bool
-malloc_init(void)
-{
-
- if (malloc_initialized == false)
- return (malloc_init_hard());
-
- return (false);
-}
-#endif
-
-#if !defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_WINCE)
-static
-#endif
-bool
-je_malloc_init_hard(void)
-{
- unsigned i;
- char buf[PATH_MAX + 1];
- const char *opts;
- long result;
-#ifndef MOZ_MEMORY_WINDOWS
- int linklen;
-#endif
-
-#ifndef MOZ_MEMORY_WINDOWS
- malloc_mutex_lock(&init_lock);
-#endif
-
- if (malloc_initialized) {
- /*
- * Another thread initialized the allocator before this one
- * acquired init_lock.
- */
-#ifndef MOZ_MEMORY_WINDOWS
- malloc_mutex_unlock(&init_lock);
-#endif
- return (false);
- }
-
-#ifdef MOZ_MEMORY_WINDOWS
- /* get a thread local storage index */
- tlsIndex = TlsAlloc();
-#endif
-
- /* Get page size and number of CPUs */
-#ifdef MOZ_MEMORY_WINDOWS
- {
- SYSTEM_INFO info;
-
- GetSystemInfo(&info);
- result = info.dwPageSize;
-
- pagesize = (unsigned) result;
-
- ncpus = info.dwNumberOfProcessors;
- }
-#else
- ncpus = malloc_ncpus();
-
- result = sysconf(_SC_PAGESIZE);
- assert(result != -1);
-
- pagesize = (unsigned) result;
-#endif
-
- /*
- * We assume that pagesize is a power of 2 when calculating
- * pagesize_mask and pagesize_2pow.
- */
- assert(((result - 1) & result) == 0);
- pagesize_mask = result - 1;
- pagesize_2pow = ffs((int)result) - 1;
-
-#ifdef MALLOC_PAGEFILE
- /*
- * Determine where to create page files. It is insufficient to
- * unconditionally use P_tmpdir (typically "/tmp"), since for some
- * operating systems /tmp is a separate filesystem that is rather small.
- * Therefore prefer, in order, the following locations:
- *
- * 1) MALLOC_TMPDIR
- * 2) TMPDIR
- * 3) P_tmpdir
- */
- {
- char *s;
- size_t slen;
- static const char suffix[] = "/jemalloc.XXXXXX";
-
- if ((s = getenv("MALLOC_TMPDIR")) == NULL && (s =
- getenv("TMPDIR")) == NULL)
- s = P_tmpdir;
- slen = strlen(s);
- if (slen + sizeof(suffix) > sizeof(pagefile_templ)) {
- _malloc_message(_getprogname(),
- ": (malloc) Page file path too long\n",
- "", "");
- abort();
- }
- memcpy(pagefile_templ, s, slen);
- memcpy(&pagefile_templ[slen], suffix, sizeof(suffix));
- }
-#endif
-
- for (i = 0; i < 3; i++) {
- unsigned j;
-
- /* Get runtime configuration. */
- switch (i) {
- case 0:
-#ifndef MOZ_MEMORY_WINDOWS
- if ((linklen = readlink("/etc/malloc.conf", buf,
- sizeof(buf) - 1)) != -1) {
- /*
- * Use the contents of the "/etc/malloc.conf"
- * symbolic link's name.
- */
- buf[linklen] = '\0';
- opts = buf;
- } else
-#endif
- {
- /* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
- }
- break;
- case 1:
- if (issetugid() == 0 && (opts =
- getenv("MALLOC_OPTIONS")) != NULL) {
- /*
- * Do nothing; opts is already initialized to
- * the value of the MALLOC_OPTIONS environment
- * variable.
- */
- } else {
- /* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
- }
- break;
- case 2:
- if (_malloc_options != NULL) {
- /*
- * Use options that were compiled into the
- * program.
- */
- opts = _malloc_options;
- } else {
- /* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
- }
- break;
- default:
- /* NOTREACHED */
- buf[0] = '\0';
- opts = buf;
- assert(false);
- }
-
- for (j = 0; opts[j] != '\0'; j++) {
- unsigned k, nreps;
- bool nseen;
-
- /* Parse repetition count, if any. */
- for (nreps = 0, nseen = false;; j++, nseen = true) {
- switch (opts[j]) {
- case '0': case '1': case '2': case '3':
- case '4': case '5': case '6': case '7':
- case '8': case '9':
- nreps *= 10;
- nreps += opts[j] - '0';
- break;
- default:
- goto MALLOC_OUT;
- }
- }
-MALLOC_OUT:
- if (nseen == false)
- nreps = 1;
-
- for (k = 0; k < nreps; k++) {
- switch (opts[j]) {
- case 'a':
- opt_abort = false;
- break;
- case 'A':
- opt_abort = true;
- break;
- case 'b':
-#ifdef MALLOC_BALANCE
- opt_balance_threshold >>= 1;
-#endif
- break;
- case 'B':
-#ifdef MALLOC_BALANCE
- if (opt_balance_threshold == 0)
- opt_balance_threshold = 1;
- else if ((opt_balance_threshold << 1)
- > opt_balance_threshold)
- opt_balance_threshold <<= 1;
-#endif
- break;
- case 'f':
- opt_dirty_max >>= 1;
- break;
- case 'F':
- if (opt_dirty_max == 0)
- opt_dirty_max = 1;
- else if ((opt_dirty_max << 1) != 0)
- opt_dirty_max <<= 1;
- break;
- case 'g':
- opt_reserve_range_lshift--;
- break;
- case 'G':
- opt_reserve_range_lshift++;
- break;
-#ifdef MALLOC_FILL
- case 'j':
- opt_junk = false;
- break;
- case 'J':
- opt_junk = true;
- break;
-#endif
- case 'k':
- /*
- * Chunks always require at least one
- * header page, so chunks can never be
- * smaller than two pages.
- */
- if (opt_chunk_2pow > pagesize_2pow + 1)
- opt_chunk_2pow--;
- break;
- case 'K':
- if (opt_chunk_2pow + 1 <
- (sizeof(size_t) << 3))
- opt_chunk_2pow++;
- break;
- case 'n':
- opt_narenas_lshift--;
- break;
- case 'N':
- opt_narenas_lshift++;
- break;
-#ifdef MALLOC_PAGEFILE
- case 'o':
- /* Do not over-commit. */
- opt_pagefile = true;
- break;
- case 'O':
- /* Allow over-commit. */
- opt_pagefile = false;
- break;
-#endif
- case 'p':
- opt_print_stats = false;
- break;
- case 'P':
- opt_print_stats = true;
- break;
- case 'q':
- if (opt_quantum_2pow > QUANTUM_2POW_MIN)
- opt_quantum_2pow--;
- break;
- case 'Q':
- if (opt_quantum_2pow < pagesize_2pow -
- 1)
- opt_quantum_2pow++;
- break;
- case 'r':
- opt_reserve_min_lshift--;
- break;
- case 'R':
- opt_reserve_min_lshift++;
- break;
- case 's':
- if (opt_small_max_2pow >
- QUANTUM_2POW_MIN)
- opt_small_max_2pow--;
- break;
- case 'S':
- if (opt_small_max_2pow < pagesize_2pow
- - 1)
- opt_small_max_2pow++;
- break;
-#ifdef MALLOC_UTRACE
- case 'u':
- opt_utrace = false;
- break;
- case 'U':
- opt_utrace = true;
- break;
-#endif
-#ifdef MALLOC_SYSV
- case 'v':
- opt_sysv = false;
- break;
- case 'V':
- opt_sysv = true;
- break;
-#endif
-#ifdef MALLOC_XMALLOC
- case 'x':
- opt_xmalloc = false;
- break;
- case 'X':
- opt_xmalloc = true;
- break;
-#endif
-#ifdef MALLOC_FILL
- case 'z':
- opt_zero = false;
- break;
- case 'Z':
- opt_zero = true;
- break;
-#endif
- default: {
- char cbuf[2];
-
- cbuf[0] = opts[j];
- cbuf[1] = '\0';
- _malloc_message(_getprogname(),
- ": (malloc) Unsupported character "
- "in malloc options: '", cbuf,
- "'\n");
- }
- }
- }
- }
- }
-
- /* Take care to call atexit() only once. */
- if (opt_print_stats) {
-#ifndef MOZ_MEMORY_WINDOWS
- /* Print statistics at exit. */
- atexit(malloc_print_stats);
-#endif
- }
-
-#if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN))
- /* Prevent potential deadlock on malloc locks after fork. */
- pthread_atfork(_malloc_prefork, _malloc_postfork, _malloc_postfork);
-#endif
-
- /* Set variables according to the value of opt_small_max_2pow. */
- if (opt_small_max_2pow < opt_quantum_2pow)
- opt_small_max_2pow = opt_quantum_2pow;
- small_max = (1U << opt_small_max_2pow);
-
- /* Set bin-related variables. */
- bin_maxclass = (pagesize >> 1);
- assert(opt_quantum_2pow >= TINY_MIN_2POW);
- ntbins = opt_quantum_2pow - TINY_MIN_2POW;
- assert(ntbins <= opt_quantum_2pow);
- nqbins = (small_max >> opt_quantum_2pow);
- nsbins = pagesize_2pow - opt_small_max_2pow - 1;
-
- /* Set variables according to the value of opt_quantum_2pow. */
- quantum = (1U << opt_quantum_2pow);
- quantum_mask = quantum - 1;
- if (ntbins > 0)
- small_min = (quantum >> 1) + 1;
- else
- small_min = 1;
- assert(small_min <= quantum);
-
- /* Set variables according to the value of opt_chunk_2pow. */
- chunksize = (1LU << opt_chunk_2pow);
- chunksize_mask = chunksize - 1;
- chunk_npages = (chunksize >> pagesize_2pow);
- {
- size_t header_size;
-
- /*
- * Compute the header size such that it is large
- * enough to contain the page map and enough nodes for the
- * worst case: one node per non-header page plus one extra for
- * situations where we briefly have one more node allocated
- * than we will need.
- */
- header_size = sizeof(arena_chunk_t) +
- (sizeof(arena_chunk_map_t) * (chunk_npages - 1));
- arena_chunk_header_npages = (header_size >> pagesize_2pow) +
- ((header_size & pagesize_mask) != 0);
- }
- arena_maxclass = chunksize - (arena_chunk_header_npages <<
- pagesize_2pow);
-
-#ifdef JEMALLOC_USES_MAP_ALIGN
- /*
- * When using MAP_ALIGN, the alignment parameter must be a power of two
- * multiple of the system pagesize, or mmap will fail.
- */
- assert((chunksize % pagesize) == 0);
- assert((1 << (ffs(chunksize / pagesize) - 1)) == (chunksize/pagesize));
-#endif
-
- UTRACE(0, 0, 0);
-
-#ifdef MALLOC_STATS
- memset(&stats_chunks, 0, sizeof(chunk_stats_t));
-#endif
-
- /* Various sanity checks that regard configuration. */
- assert(quantum >= sizeof(void *));
- assert(quantum <= pagesize);
- assert(chunksize >= pagesize);
- assert(quantum * 4 <= chunksize);
-
- /* Initialize chunks data. */
- malloc_mutex_init(&huge_mtx);
- extent_tree_ad_new(&huge);
-#ifdef MALLOC_STATS
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
-#endif
-
- /* Initialize base allocation data structures. */
-#ifdef MALLOC_STATS
- base_mapped = 0;
-#endif
- base_nodes = NULL;
- base_reserve_regs = NULL;
- malloc_mutex_init(&base_mtx);
-
-#ifdef MOZ_MEMORY_NARENAS_DEFAULT_ONE
- narenas = 1;
-#else
- if (ncpus > 1) {
- /*
- * For SMP systems, create four times as many arenas as there
- * are CPUs by default.
- */
- opt_narenas_lshift += 2;
- }
-
- /* Determine how many arenas to use. */
- narenas = ncpus;
-#endif
- if (opt_narenas_lshift > 0) {
- if ((narenas << opt_narenas_lshift) > narenas)
- narenas <<= opt_narenas_lshift;
- /*
- * Make sure not to exceed the limits of what base_alloc() can
- * handle.
- */
- if (narenas * sizeof(arena_t *) > chunksize)
- narenas = chunksize / sizeof(arena_t *);
- } else if (opt_narenas_lshift < 0) {
- if ((narenas >> -opt_narenas_lshift) < narenas)
- narenas >>= -opt_narenas_lshift;
- /* Make sure there is at least one arena. */
- if (narenas == 0)
- narenas = 1;
- }
-#ifdef MALLOC_BALANCE
- assert(narenas != 0);
- for (narenas_2pow = 0;
- (narenas >> (narenas_2pow + 1)) != 0;
- narenas_2pow++);
-#endif
-
-#ifdef NO_TLS
- if (narenas > 1) {
- static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19,
- 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
- 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
- 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
- 223, 227, 229, 233, 239, 241, 251, 257, 263};
- unsigned nprimes, parenas;
-
- /*
- * Pick a prime number of hash arenas that is more than narenas
- * so that direct hashing of pthread_self() pointers tends to
- * spread allocations evenly among the arenas.
- */
- assert((narenas & 1) == 0); /* narenas must be even. */
- nprimes = (sizeof(primes) >> SIZEOF_INT_2POW);
- parenas = primes[nprimes - 1]; /* In case not enough primes. */
- for (i = 1; i < nprimes; i++) {
- if (primes[i] > narenas) {
- parenas = primes[i];
- break;
- }
- }
- narenas = parenas;
- }
-#endif
-
-#ifndef NO_TLS
-# ifndef MALLOC_BALANCE
- next_arena = 0;
-# endif
-#endif
-
- /* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
- if (arenas == NULL) {
-#ifndef MOZ_MEMORY_WINDOWS
- malloc_mutex_unlock(&init_lock);
-#endif
- return (true);
- }
- /*
- * Zero the array. In practice, this should always be pre-zeroed,
- * since it was just mmap()ed, but let's be sure.
- */
- memset(arenas, 0, sizeof(arena_t *) * narenas);
-
- /*
- * Initialize one arena here. The rest are lazily created in
- * choose_arena_hard().
- */
- arenas_extend(0);
- if (arenas[0] == NULL) {
-#ifndef MOZ_MEMORY_WINDOWS
- malloc_mutex_unlock(&init_lock);
-#endif
- return (true);
- }
-#ifndef NO_TLS
- /*
- * Assign the initial arena to the initial thread, in order to avoid
- * spurious creation of an extra arena if the application switches to
- * threaded mode.
- */
-#ifdef MOZ_MEMORY_WINDOWS
- TlsSetValue(tlsIndex, arenas[0]);
-#else
- arenas_map = arenas[0];
-#endif
-#endif
-
- /*
- * Seed here for the initial thread, since choose_arena_hard() is only
- * called for other threads. The seed value doesn't really matter.
- */
-#ifdef MALLOC_BALANCE
- SPRN(balance, 42);
-#endif
-
- malloc_spin_init(&arenas_lock);
-
-#ifdef MALLOC_VALIDATE
- chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - opt_chunk_2pow);
- if (chunk_rtree == NULL)
- return (true);
-#endif
-
- /*
- * Configure and initialize the memory reserve. This needs to happen
- * late during initialization, since chunks are allocated.
- */
- malloc_mutex_init(&reserve_mtx);
- reserve_min = 0;
- reserve_cur = 0;
- reserve_max = 0;
- if (RESERVE_RANGE_2POW_DEFAULT + opt_reserve_range_lshift >= 0) {
- reserve_max += chunksize << (RESERVE_RANGE_2POW_DEFAULT +
- opt_reserve_range_lshift);
- }
- ql_new(&reserve_regs);
- reserve_seq = 0;
- extent_tree_szad_new(&reserve_chunks_szad);
- extent_tree_ad_new(&reserve_chunks_ad);
- if (RESERVE_MIN_2POW_DEFAULT + opt_reserve_min_lshift >= 0) {
- reserve_min_set(chunksize << (RESERVE_MIN_2POW_DEFAULT +
- opt_reserve_min_lshift));
- }
-
- malloc_initialized = true;
-#ifndef MOZ_MEMORY_WINDOWS
- malloc_mutex_unlock(&init_lock);
-#endif
- return (false);
-}
-
-/* XXX Why not just expose malloc_print_stats()? */
-#ifdef MOZ_MEMORY_WINDOWS
-void
-malloc_shutdown()
-{
-
- malloc_print_stats();
-}
-#endif
-
-/*
- * End general internal functions.
- */
-/******************************************************************************/
-/*
- * Begin malloc(3)-compatible functions.
- */
-
-/*
- * Inline the standard malloc functions if they are being subsumed by Darwin's
- * zone infrastructure.
- */
-#ifdef MOZ_MEMORY_DARWIN
-# define ZONE_INLINE inline
-#else
-# define ZONE_INLINE
-#endif
-
-/* Mangle standard interfaces on Darwin and Windows CE,
- in order to avoid linking problems. */
-#ifdef MOZ_MEMORY_DARWIN
-#define DONT_OVERRIDE_LIBC
-#endif
-
-#if defined(DONT_OVERRIDE_LIBC)
-#define malloc(a) je_malloc(a)
-#define valloc(a) je_valloc(a)
-#define calloc(a, b) je_calloc(a, b)
-#define realloc(a, b) je_realloc(a, b)
-#define free(a) je_free(a)
-#define _msize(p) je_msize(p)
-#define _recalloc(p, n, s) je_recalloc(p, n, s)
-#endif
-
-ZONE_INLINE
-void *
-malloc(size_t size)
-{
- void *ret;
-
- if (malloc_init()) {
- ret = NULL;
- goto RETURN;
- }
-
- if (size == 0) {
-#ifdef MALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef MALLOC_SYSV
- else {
- ret = NULL;
- goto RETURN;
- }
-#endif
- }
-
- ret = imalloc(size);
-
-RETURN:
- if (ret == NULL) {
-#ifdef MALLOC_XMALLOC
- if (opt_xmalloc) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in malloc(): out of memory\n", "",
- "");
- abort();
- }
-#endif
- errno = ENOMEM;
- }
-
- UTRACE(0, size, ret);
- return (ret);
-}
-
-#ifdef MOZ_MEMORY_SOLARIS
-# ifdef __SUNPRO_C
-void *
-memalign(size_t alignment, size_t size);
-#pragma no_inline(memalign)
-# elif (defined(__GNU_C__))
-__attribute__((noinline))
-# endif
-#else
-inline
-#endif
-void *
-memalign(size_t alignment, size_t size)
-{
- void *ret;
-
- assert(((alignment - 1) & alignment) == 0 && alignment >=
- sizeof(void *));
-
- if (malloc_init()) {
- ret = NULL;
- goto RETURN;
- }
-
- ret = ipalloc(alignment, size);
-
-RETURN:
-#ifdef MALLOC_XMALLOC
- if (opt_xmalloc && ret == NULL) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in memalign(): out of memory\n", "", "");
- abort();
- }
-#endif
- UTRACE(0, size, ret);
- return (ret);
-}
-
-ZONE_INLINE
-int
-posix_memalign(void **memptr, size_t alignment, size_t size)
-{
- void *result;
-
- /* Make sure that alignment is a large enough power of 2. */
- if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) {
-#ifdef MALLOC_XMALLOC
- if (opt_xmalloc) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in posix_memalign(): "
- "invalid alignment\n", "", "");
- abort();
- }
-#endif
- return (EINVAL);
- }
-
-#ifdef MOZ_MEMORY_DARWIN
- result = moz_memalign(alignment, size);
-#else
- result = memalign(alignment, size);
-#endif
- if (result == NULL)
- return (ENOMEM);
-
- *memptr = result;
- return (0);
-}
-
-ZONE_INLINE
-void *
-valloc(size_t size)
-{
-#ifdef MOZ_MEMORY_DARWIN
- return (moz_memalign(pagesize, size));
-#else
- return (memalign(pagesize, size));
-#endif
-}
-
-ZONE_INLINE
-void *
-calloc(size_t num, size_t size)
-{
- void *ret;
- size_t num_size;
-
- if (malloc_init()) {
- num_size = 0;
- ret = NULL;
- goto RETURN;
- }
-
- num_size = num * size;
- if (num_size == 0) {
-#ifdef MALLOC_SYSV
- if ((opt_sysv == false) && ((num == 0) || (size == 0)))
-#endif
- num_size = 1;
-#ifdef MALLOC_SYSV
- else {
- ret = NULL;
- goto RETURN;
- }
-#endif
- /*
- * Try to avoid division here. We know that it isn't possible to
- * overflow during multiplication if neither operand uses any of the
- * most significant half of the bits in a size_t.
- */
- } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / size != num)) {
- /* size_t overflow. */
- ret = NULL;
- goto RETURN;
- }
-
- ret = icalloc(num_size);
-
-RETURN:
- if (ret == NULL) {
-#ifdef MALLOC_XMALLOC
- if (opt_xmalloc) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in calloc(): out of memory\n", "",
- "");
- abort();
- }
-#endif
- errno = ENOMEM;
- }
-
- UTRACE(0, num_size, ret);
- return (ret);
-}
-
-ZONE_INLINE
-void *
-realloc(void *ptr, size_t size)
-{
- void *ret;
-
- if (size == 0) {
-#ifdef MALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef MALLOC_SYSV
- else {
- if (ptr != NULL)
- idalloc(ptr);
- ret = NULL;
- goto RETURN;
- }
-#endif
- }
-
- if (ptr != NULL) {
- assert(malloc_initialized);
-
- ret = iralloc(ptr, size);
-
- if (ret == NULL) {
-#ifdef MALLOC_XMALLOC
- if (opt_xmalloc) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in realloc(): out of "
- "memory\n", "", "");
- abort();
- }
-#endif
- errno = ENOMEM;
- }
- } else {
- if (malloc_init())
- ret = NULL;
- else
- ret = imalloc(size);
-
- if (ret == NULL) {
-#ifdef MALLOC_XMALLOC
- if (opt_xmalloc) {
- _malloc_message(_getprogname(),
- ": (malloc) Error in realloc(): out of "
- "memory\n", "", "");
- abort();
- }
-#endif
- errno = ENOMEM;
- }
- }
-
-#ifdef MALLOC_SYSV
-RETURN:
-#endif
- UTRACE(ptr, size, ret);
- return (ret);
-}
-
-ZONE_INLINE
-void
-free(void *ptr)
-{
-
- UTRACE(ptr, 0, 0);
- if (ptr != NULL) {
- assert(malloc_initialized);
-
- idalloc(ptr);
- }
-}
-
-/*
- * End malloc(3)-compatible functions.
- */
-/******************************************************************************/
-/*
- * Begin non-standard functions.
- */
-
-size_t
-malloc_usable_size(const void *ptr)
-{
-
-#ifdef MALLOC_VALIDATE
- return (isalloc_validate(ptr));
-#else
- assert(ptr != NULL);
-
- return (isalloc(ptr));
-#endif
-}
-
-void
-jemalloc_stats(jemalloc_stats_t *stats)
-{
- size_t i;
-
- assert(stats != NULL);
-
- /*
- * Gather runtime settings.
- */
- stats->opt_abort = opt_abort;
- stats->opt_junk =
-#ifdef MALLOC_FILL
- opt_junk ? true :
-#endif
- false;
- stats->opt_utrace =
-#ifdef MALLOC_UTRACE
- opt_utrace ? true :
-#endif
- false;
- stats->opt_sysv =
-#ifdef MALLOC_SYSV
- opt_sysv ? true :
-#endif
- false;
- stats->opt_xmalloc =
-#ifdef MALLOC_XMALLOC
- opt_xmalloc ? true :
-#endif
- false;
- stats->opt_zero =
-#ifdef MALLOC_FILL
- opt_zero ? true :
-#endif
- false;
- stats->narenas = narenas;
- stats->balance_threshold =
-#ifdef MALLOC_BALANCE
- opt_balance_threshold
-#else
- SIZE_T_MAX
-#endif
- ;
- stats->quantum = quantum;
- stats->small_max = small_max;
- stats->large_max = arena_maxclass;
- stats->chunksize = chunksize;
- stats->dirty_max = opt_dirty_max;
-
- malloc_mutex_lock(&reserve_mtx);
- stats->reserve_min = reserve_min;
- stats->reserve_max = reserve_max;
- stats->reserve_cur = reserve_cur;
- malloc_mutex_unlock(&reserve_mtx);
-
- /*
- * Gather current memory usage statistics.
- */
- stats->mapped = 0;
- stats->committed = 0;
- stats->allocated = 0;
- stats->dirty = 0;
-
- /* Get huge mapped/allocated. */
- malloc_mutex_lock(&huge_mtx);
- stats->mapped += stats_chunks.curchunks * chunksize;
-#ifdef MALLOC_DECOMMIT
- stats->committed += huge_allocated;
-#endif
- stats->allocated += huge_allocated;
- malloc_mutex_unlock(&huge_mtx);
-
- /* Get base mapped. */
- malloc_mutex_lock(&base_mtx);
- stats->mapped += base_mapped;
-#ifdef MALLOC_DECOMMIT
- stats->committed += base_mapped;
-#endif
- malloc_mutex_unlock(&base_mtx);
-
- /* Iterate over arenas and their chunks. */
- for (i = 0; i < narenas; i++) {
- arena_t *arena = arenas[i];
- if (arena != NULL) {
- arena_chunk_t *chunk;
-
- malloc_spin_lock(&arena->lock);
- stats->allocated += arena->stats.allocated_small;
- stats->allocated += arena->stats.allocated_large;
-#ifdef MALLOC_DECOMMIT
- rb_foreach_begin(arena_chunk_t, link_dirty,
- &arena->chunks_dirty, chunk) {
- size_t j;
-
- for (j = 0; j < chunk_npages; j++) {
- if ((chunk->map[j].bits &
- CHUNK_MAP_DECOMMITTED) == 0)
- stats->committed += pagesize;
- }
- } rb_foreach_end(arena_chunk_t, link_dirty,
- &arena->chunks_dirty, chunk)
-#endif
- stats->dirty += (arena->ndirty << pagesize_2pow);
- malloc_spin_unlock(&arena->lock);
- }
- }
-
-#ifndef MALLOC_DECOMMIT
- stats->committed = stats->mapped;
-#endif
-}
-
-void *
-xmalloc(size_t size)
-{
- void *ret;
-
- if (malloc_init())
- reserve_fail(size, "xmalloc");
-
- if (size == 0) {
-#ifdef MALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef MALLOC_SYSV
- else {
- _malloc_message(_getprogname(),
- ": (malloc) Error in xmalloc(): ",
- "invalid size 0", "\n");
- abort();
- }
-#endif
- }
-
- ret = imalloc(size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xmalloc", seq);
- ret = imalloc(size);
- } while (ret == NULL);
- }
-
- UTRACE(0, size, ret);
- return (ret);
-}
-
-void *
-xcalloc(size_t num, size_t size)
-{
- void *ret;
- size_t num_size;
-
- num_size = num * size;
- if (malloc_init())
- reserve_fail(num_size, "xcalloc");
-
- if (num_size == 0) {
-#ifdef MALLOC_SYSV
- if ((opt_sysv == false) && ((num == 0) || (size == 0)))
-#endif
- num_size = 1;
-#ifdef MALLOC_SYSV
- else {
- _malloc_message(_getprogname(),
- ": (malloc) Error in xcalloc(): ",
- "invalid size 0", "\n");
- abort();
- }
-#endif
- /*
- * Try to avoid division here. We know that it isn't possible to
- * overflow during multiplication if neither operand uses any of the
- * most significant half of the bits in a size_t.
- */
- } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / size != num)) {
- /* size_t overflow. */
- _malloc_message(_getprogname(),
- ": (malloc) Error in xcalloc(): ",
- "size overflow", "\n");
- abort();
- }
-
- ret = icalloc(num_size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(num_size, "xcalloc", seq);
- ret = icalloc(num_size);
- } while (ret == NULL);
- }
-
- UTRACE(0, num_size, ret);
- return (ret);
-}
-
-void *
-xrealloc(void *ptr, size_t size)
-{
- void *ret;
-
- if (size == 0) {
-#ifdef MALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef MALLOC_SYSV
- else {
- if (ptr != NULL)
- idalloc(ptr);
- _malloc_message(_getprogname(),
- ": (malloc) Error in xrealloc(): ",
- "invalid size 0", "\n");
- abort();
- }
-#endif
- }
-
- if (ptr != NULL) {
- assert(malloc_initialized);
-
- ret = iralloc(ptr, size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xrealloc", seq);
- ret = iralloc(ptr, size);
- } while (ret == NULL);
- }
- } else {
- if (malloc_init())
- reserve_fail(size, "xrealloc");
-
- ret = imalloc(size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xrealloc", seq);
- ret = imalloc(size);
- } while (ret == NULL);
- }
- }
-
- UTRACE(ptr, size, ret);
- return (ret);
-}
-
-void *
-xmemalign(size_t alignment, size_t size)
-{
- void *ret;
-
- assert(((alignment - 1) & alignment) == 0 && alignment >=
- sizeof(void *));
-
- if (malloc_init())
- reserve_fail(size, "xmemalign");
-
- ret = ipalloc(alignment, size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xmemalign", seq);
- ret = ipalloc(alignment, size);
- } while (ret == NULL);
- }
-
- UTRACE(0, size, ret);
- return (ret);
-}
-
-static void
-reserve_shrink(void)
-{
- extent_node_t *node;
-
- assert(reserve_cur > reserve_max);
-#ifdef MALLOC_DEBUG
- {
- extent_node_t *node;
- size_t reserve_size;
-
- reserve_size = 0;
- rb_foreach_begin(extent_node_t, link_szad, &reserve_chunks_szad,
- node) {
- reserve_size += node->size;
- } rb_foreach_end(extent_node_t, link_szad, &reserve_chunks_szad,
- node)
- assert(reserve_size == reserve_cur);
-
- reserve_size = 0;
- rb_foreach_begin(extent_node_t, link_ad, &reserve_chunks_ad,
- node) {
- reserve_size += node->size;
- } rb_foreach_end(extent_node_t, link_ad, &reserve_chunks_ad,
- node)
- assert(reserve_size == reserve_cur);
- }
-#endif
-
- /* Discard chunks until the the reserve is below the size limit. */
- rb_foreach_reverse_begin(extent_node_t, link_ad, &reserve_chunks_ad,
- node) {
-#ifndef MALLOC_DECOMMIT
- if (node->size <= reserve_cur - reserve_max) {
-#endif
- extent_node_t *tnode = extent_tree_ad_prev(
- &reserve_chunks_ad, node);
-
-#ifdef MALLOC_DECOMMIT
- assert(node->size <= reserve_cur - reserve_max);
-#endif
-
- /* Discard the entire [multi-]chunk. */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- extent_tree_ad_remove(&reserve_chunks_ad, node);
- reserve_cur -= node->size;
- pages_unmap(node->addr, node->size);
-#ifdef MALLOC_STATS
- stats_chunks.curchunks -= (node->size / chunksize);
-#endif
- base_node_dealloc(node);
- if (reserve_cur == reserve_max)
- break;
-
- rb_foreach_reverse_prev(extent_node_t, link_ad,
- extent_ad_comp, &reserve_chunks_ad, tnode);
-#ifndef MALLOC_DECOMMIT
- } else {
- /* Discard the end of the multi-chunk. */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- node->size -= reserve_cur - reserve_max;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
- pages_unmap((void *)((uintptr_t)node->addr +
- node->size), reserve_cur - reserve_max);
-#ifdef MALLOC_STATS
- stats_chunks.curchunks -= ((reserve_cur - reserve_max) /
- chunksize);
-#endif
- reserve_cur = reserve_max;
- break;
- }
-#endif
- assert(reserve_cur > reserve_max);
- } rb_foreach_reverse_end(extent_node_t, link_ad, &reserve_chunks_ad,
- node)
-}
-
-/* Send a condition notification. */
-static uint64_t
-reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq)
-{
- reserve_reg_t *reg;
-
- /* seq is used to keep track of distinct condition-causing events. */
- if (seq == 0) {
- /* Allocate new sequence number. */
- reserve_seq++;
- seq = reserve_seq;
- }
-
- /*
- * Advance to the next callback registration and send a notification,
- * unless one has already been sent for this condition-causing event.
- */
- reg = ql_first(&reserve_regs);
- if (reg == NULL)
- return (0);
- ql_first(&reserve_regs) = ql_next(&reserve_regs, reg, link);
- if (reg->seq == seq)
- return (0);
- reg->seq = seq;
- malloc_mutex_unlock(&reserve_mtx);
- reg->cb(reg->ctx, cnd, size);
- malloc_mutex_lock(&reserve_mtx);
-
- return (seq);
-}
-
-/* Allocation failure due to OOM. Try to free some memory via callbacks. */
-static uint64_t
-reserve_crit(size_t size, const char *fname, uint64_t seq)
-{
-
- /*
- * Send one condition notification. Iteration is handled by the
- * caller of this function.
- */
- malloc_mutex_lock(&reserve_mtx);
- seq = reserve_notify(RESERVE_CND_CRIT, size, seq);
- malloc_mutex_unlock(&reserve_mtx);
-
- /* If no notification could be sent, then no further recourse exists. */
- if (seq == 0)
- reserve_fail(size, fname);
-
- return (seq);
-}
-
-/* Permanent allocation failure due to OOM. */
-static void
-reserve_fail(size_t size, const char *fname)
-{
- uint64_t seq = 0;
-
- /* Send fail notifications. */
- malloc_mutex_lock(&reserve_mtx);
- do {
- seq = reserve_notify(RESERVE_CND_FAIL, size, seq);
- } while (seq != 0);
- malloc_mutex_unlock(&reserve_mtx);
-
- /* Terminate the application. */
- _malloc_message(_getprogname(),
- ": (malloc) Error in ", fname, "(): out of memory\n");
- abort();
-}
-
-bool
-reserve_cb_register(reserve_cb_t *cb, void *ctx)
-{
- reserve_reg_t *reg = base_reserve_reg_alloc();
- if (reg == NULL)
- return (true);
-
- ql_elm_new(reg, link);
- reg->cb = cb;
- reg->ctx = ctx;
- reg->seq = 0;
-
- malloc_mutex_lock(&reserve_mtx);
- ql_head_insert(&reserve_regs, reg, link);
- malloc_mutex_unlock(&reserve_mtx);
-
- return (false);
-}
-
-bool
-reserve_cb_unregister(reserve_cb_t *cb, void *ctx)
-{
- reserve_reg_t *reg = NULL;
-
- malloc_mutex_lock(&reserve_mtx);
- ql_foreach(reg, &reserve_regs, link) {
- if (reg->cb == cb && reg->ctx == ctx) {
- ql_remove(&reserve_regs, reg, link);
- break;
- }
- }
- malloc_mutex_unlock(&reserve_mtx);
-
- if (reg != NULL)
- base_reserve_reg_dealloc(reg);
- return (false);
- return (true);
-}
-
-size_t
-reserve_cur_get(void)
-{
- size_t ret;
-
- malloc_mutex_lock(&reserve_mtx);
- ret = reserve_cur;
- malloc_mutex_unlock(&reserve_mtx);
-
- return (ret);
-}
-
-size_t
-reserve_min_get(void)
-{
- size_t ret;
-
- malloc_mutex_lock(&reserve_mtx);
- ret = reserve_min;
- malloc_mutex_unlock(&reserve_mtx);
-
- return (ret);
-}
-
-bool
-reserve_min_set(size_t min)
-{
-
- min = CHUNK_CEILING(min);
-
- malloc_mutex_lock(&reserve_mtx);
- /* Keep |reserve_max - reserve_min| the same. */
- if (min < reserve_min) {
- reserve_max -= reserve_min - min;
- reserve_min = min;
- } else {
- /* Protect against wrap-around. */
- if (reserve_max + min - reserve_min < reserve_max) {
- reserve_min = SIZE_T_MAX - (reserve_max - reserve_min)
- - chunksize + 1;
- reserve_max = SIZE_T_MAX - chunksize + 1;
- } else {
- reserve_max += min - reserve_min;
- reserve_min = min;
- }
- }
-
- /* Resize the reserve if necessary. */
- if (reserve_cur < reserve_min) {
- size_t size = reserve_min - reserve_cur;
-
- /* Force the reserve to grow by allocating/deallocating. */
- malloc_mutex_unlock(&reserve_mtx);
-#ifdef MALLOC_DECOMMIT
- {
- void **chunks;
- size_t i, n;
-
- n = size >> opt_chunk_2pow;
- chunks = (void**)imalloc(n * sizeof(void *));
- if (chunks == NULL)
- return (true);
- for (i = 0; i < n; i++) {
- chunks[i] = huge_malloc(chunksize, false);
- if (chunks[i] == NULL) {
- size_t j;
-
- for (j = 0; j < i; j++) {
- huge_dalloc(chunks[j]);
- }
- idalloc(chunks);
- return (true);
- }
- }
- for (i = 0; i < n; i++)
- huge_dalloc(chunks[i]);
- idalloc(chunks);
- }
-#else
- {
- void *x = huge_malloc(size, false);
- if (x == NULL) {
- return (true);
- }
- huge_dalloc(x);
- }
-#endif
- } else if (reserve_cur > reserve_max) {
- reserve_shrink();
- malloc_mutex_unlock(&reserve_mtx);
- } else
- malloc_mutex_unlock(&reserve_mtx);
-
- return (false);
-}
-
-#ifdef MOZ_MEMORY_WINDOWS
-void*
-_recalloc(void *ptr, size_t count, size_t size)
-{
- size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
- size_t newsize = count * size;
-
- /*
- * In order for all trailing bytes to be zeroed, the caller needs to
- * use calloc(), followed by recalloc(). However, the current calloc()
- * implementation only zeros the bytes requested, so if recalloc() is
- * to work 100% correctly, calloc() will need to change to zero
- * trailing bytes.
- */
-
- ptr = realloc(ptr, newsize);
- if (ptr != NULL && oldsize < newsize) {
- memset((void *)((uintptr_t)ptr + oldsize), 0, newsize -
- oldsize);
- }
-
- return ptr;
-}
-
-/*
- * This impl of _expand doesn't ever actually expand or shrink blocks: it
- * simply replies that you may continue using a shrunk block.
- */
-void*
-_expand(void *ptr, size_t newsize)
-{
- if (isalloc(ptr) >= newsize)
- return ptr;
-
- return NULL;
-}
-
-size_t
-_msize(const void *ptr)
-{
- return malloc_usable_size(ptr);
-}
-#endif
-
-/*
- * End non-standard functions.
- */
-/******************************************************************************/
-/*
- * Begin library-private functions, used by threading libraries for protection
- * of malloc during fork(). These functions are only called if the program is
- * running in threaded mode, so there is no need to check whether the program
- * is threaded here.
- */
-
-void
-_malloc_prefork(void)
-{
- unsigned i;
-
- /* Acquire all mutexes in a safe order. */
-
- malloc_spin_lock(&arenas_lock);
- for (i = 0; i < narenas; i++) {
- if (arenas[i] != NULL)
- malloc_spin_lock(&arenas[i]->lock);
- }
- malloc_spin_unlock(&arenas_lock);
-
- malloc_mutex_lock(&base_mtx);
-
- malloc_mutex_lock(&huge_mtx);
-}
-
-void
-_malloc_postfork(void)
-{
- unsigned i;
-
- /* Release all mutexes, now that fork() has completed. */
-
- malloc_mutex_unlock(&huge_mtx);
-
- malloc_mutex_unlock(&base_mtx);
-
- malloc_spin_lock(&arenas_lock);
- for (i = 0; i < narenas; i++) {
- if (arenas[i] != NULL)
- malloc_spin_unlock(&arenas[i]->lock);
- }
- malloc_spin_unlock(&arenas_lock);
-}
-
-/*
- * End library-private functions.
- */
-/******************************************************************************/
-
-#ifdef HAVE_LIBDL
-# include <dlfcn.h>
-#endif
-
-#ifdef MOZ_MEMORY_DARWIN
-static malloc_zone_t zone;
-static struct malloc_introspection_t zone_introspect;
-
-static size_t
-zone_size(malloc_zone_t *zone, void *ptr)
-{
-
- /*
- * There appear to be places within Darwin (such as setenv(3)) that
- * cause calls to this function with pointers that *no* zone owns. If
- * we knew that all pointers were owned by *some* zone, we could split
- * our zone into two parts, and use one as the default allocator and
- * the other as the default deallocator/reallocator. Since that will
- * not work in practice, we must check all pointers to assure that they
- * reside within a mapped chunk before determining size.
- */
- return (isalloc_validate(ptr));
-}
-
-static void *
-zone_malloc(malloc_zone_t *zone, size_t size)
-{
-
- return (malloc(size));
-}
-
-static void *
-zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
-{
-
- return (calloc(num, size));
-}
-
-static void *
-zone_valloc(malloc_zone_t *zone, size_t size)
-{
- void *ret = NULL; /* Assignment avoids useless compiler warning. */
-
- posix_memalign(&ret, pagesize, size);
-
- return (ret);
-}
-
-static void
-zone_free(malloc_zone_t *zone, void *ptr)
-{
-
- free(ptr);
-}
-
-static void *
-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
-{
-
- return (realloc(ptr, size));
-}
-
-static void *
-zone_destroy(malloc_zone_t *zone)
-{
-
- /* This function should never be called. */
- assert(false);
- return (NULL);
-}
-
-static size_t
-zone_good_size(malloc_zone_t *zone, size_t size)
-{
- size_t ret;
- void *p;
-
- /*
- * Actually create an object of the appropriate size, then find out
- * how large it could have been without moving up to the next size
- * class.
- */
- p = malloc(size);
- if (p != NULL) {
- ret = isalloc(p);
- free(p);
- } else
- ret = size;
-
- return (ret);
-}
-
-static void
-zone_force_lock(malloc_zone_t *zone)
-{
-
- _malloc_prefork();
-}
-
-static void
-zone_force_unlock(malloc_zone_t *zone)
-{
-
- _malloc_postfork();
-}
-
-static malloc_zone_t *
-create_zone(void)
-{
-
- assert(malloc_initialized);
-
- zone.size = (void *)zone_size;
- zone.malloc = (void *)zone_malloc;
- zone.calloc = (void *)zone_calloc;
- zone.valloc = (void *)zone_valloc;
- zone.free = (void *)zone_free;
- zone.realloc = (void *)zone_realloc;
- zone.destroy = (void *)zone_destroy;
- zone.zone_name = "jemalloc_zone";
- zone.batch_malloc = NULL;
- zone.batch_free = NULL;
- zone.introspect = &zone_introspect;
-
- zone_introspect.enumerator = NULL;
- zone_introspect.good_size = (void *)zone_good_size;
- zone_introspect.check = NULL;
- zone_introspect.print = NULL;
- zone_introspect.log = NULL;
- zone_introspect.force_lock = (void *)zone_force_lock;
- zone_introspect.force_unlock = (void *)zone_force_unlock;
- zone_introspect.statistics = NULL;
-
- return (&zone);
-}
-
-__attribute__((constructor))
-void
-jemalloc_darwin_init(void)
-{
- extern unsigned malloc_num_zones;
- extern malloc_zone_t **malloc_zones;
-
- if (malloc_init_hard())
- abort();
-
- /*
- * The following code is *not* thread-safe, so it's critical that
- * initialization be manually triggered.
- */
-
- /* Register the custom zones. */
- malloc_zone_register(create_zone());
- assert(malloc_zones[malloc_num_zones - 1] == &zone);
-
- /*
- * Shift malloc_zones around so that zone is first, which makes it the
- * default zone.
- */
- assert(malloc_num_zones > 1);
- memmove(&malloc_zones[1], &malloc_zones[0],
- sizeof(malloc_zone_t *) * (malloc_num_zones - 1));
- malloc_zones[0] = &zone;
-}
-
-#elif defined(__GLIBC__) && !defined(__UCLIBC__)
-/*
- * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
- * to inconsistently reference libc's malloc(3)-compatible functions
- * (bug 493541).
- *
- * These definitions interpose hooks in glibc. The functions are actually
- * passed an extra argument for the caller return address, which will be
- * ignored.
- */
-void (*__free_hook)(void *ptr) = free;
-void *(*__malloc_hook)(size_t size) = malloc;
-void *(*__realloc_hook)(void *ptr, size_t size) = realloc;
-void *(*__memalign_hook)(size_t alignment, size_t size) = memalign;
-
-#elif defined(RTLD_DEEPBIND)
-/*
- * XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
- * implementations permit similar inconsistencies? Should STV_SINGLETON
- * visibility be used for interposition where available?
- */
-# error "Interposing malloc is unsafe on this system without libc malloc hooks."
-#endif
-
diff --git a/third_party/tcmalloc/jemalloc/jemalloc.h b/third_party/tcmalloc/jemalloc/jemalloc.h
deleted file mode 100644
index 46dc768..0000000
--- a/third_party/tcmalloc/jemalloc/jemalloc.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
-/* vim:set softtabstop=8 shiftwidth=8: */
-/*-
- * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice(s), this list of conditions and the following disclaimer as
- * the first lines of this file unmodified other than the possible
- * addition of one or more copyright notices.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice(s), this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _JEMALLOC_H_
-#define _JEMALLOC_H_
-
-/* grab size_t */
-#ifdef _MSC_VER
-#include <crtdefs.h>
-#else
-#include <stddef.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef unsigned char jemalloc_bool;
-
-extern const char *_malloc_options;
-
-/*
- * jemalloc_stats() is not a stable interface. When using jemalloc_stats_t, be
- * sure that the compiled results of jemalloc.c are in sync with this header
- * file.
- */
-typedef struct {
- /*
- * Run-time configuration settings.
- */
- jemalloc_bool opt_abort; /* abort(3) on error? */
- jemalloc_bool opt_junk; /* Fill allocated/free memory with 0xa5/0x5a? */
- jemalloc_bool opt_utrace; /* Trace all allocation events? */
- jemalloc_bool opt_sysv; /* SysV semantics? */
- jemalloc_bool opt_xmalloc; /* abort(3) on OOM? */
- jemalloc_bool opt_zero; /* Fill allocated memory with 0x0? */
- size_t narenas; /* Number of arenas. */
- size_t balance_threshold; /* Arena contention rebalance threshold. */
- size_t quantum; /* Allocation quantum. */
- size_t small_max; /* Max quantum-spaced allocation size. */
- size_t large_max; /* Max sub-chunksize allocation size. */
- size_t chunksize; /* Size of each virtual memory mapping. */
- size_t dirty_max; /* Max dirty pages per arena. */
- size_t reserve_min; /* reserve_low callback threshold. */
- size_t reserve_max; /* Maximum reserve size before unmapping. */
-
- /*
- * Current memory usage statistics.
- */
- size_t mapped; /* Bytes mapped (not necessarily committed). */
- size_t committed; /* Bytes committed (readable/writable). */
- size_t allocated; /* Bytes allocted (in use by application). */
- size_t dirty; /* Bytes dirty (committed unused pages). */
- size_t reserve_cur; /* Current memory reserve. */
-} jemalloc_stats_t;
-
-#ifndef MOZ_MEMORY_DARWIN
-void *malloc(size_t size);
-void *valloc(size_t size);
-void *calloc(size_t num, size_t size);
-void *realloc(void *ptr, size_t size);
-void free(void *ptr);
-#endif
-
-int posix_memalign(void **memptr, size_t alignment, size_t size);
-void *memalign(size_t alignment, size_t size);
-size_t malloc_usable_size(const void *ptr);
-void jemalloc_stats(jemalloc_stats_t *stats);
-
-/* The x*() functions never return NULL. */
-void *xmalloc(size_t size);
-void *xcalloc(size_t num, size_t size);
-void *xrealloc(void *ptr, size_t size);
-void *xmemalign(size_t alignment, size_t size);
-
-/*
- * The allocator maintains a memory reserve that is used to satisfy allocation
- * requests when no additional memory can be acquired from the operating
- * system. Under normal operating conditions, the reserve size is at least
- * reserve_min bytes. If the reserve is depleted or insufficient to satisfy an
- * allocation request, then condition notifications are sent to one or more of
- * the registered callback functions:
- *
- * RESERVE_CND_LOW: The reserve had to be used to satisfy an allocation
- * request, which dropped the reserve size below the
- * minimum. The callee should try to free memory in order
- * to restore the reserve.
- *
- * RESERVE_CND_CRIT: The reserve was not large enough to satisfy a pending
- * allocation request. Some callee must free adequate
- * memory in order to prevent application failure (unless
- * the condition spontaneously desists due to concurrent
- * deallocation).
- *
- * RESERVE_CND_FAIL: An allocation request could not be satisfied, despite all
- * attempts. The allocator is about to terminate the
- * application.
- *
- * The order in which the callback functions are called is only loosely
- * specified: in the absence of interposing callback
- * registrations/unregistrations, enabled callbacks will be called in an
- * arbitrary round-robin order.
- *
- * Condition notifications are sent to callbacks only while conditions exist.
- * For example, just before the allocator sends a RESERVE_CND_LOW condition
- * notification to a callback, the reserve is in fact depleted. However, due
- * to allocator concurrency, the reserve may have been restored by the time the
- * callback function executes. Furthermore, if the reserve is restored at some
- * point during the delivery of condition notifications to callbacks, no
- * further deliveries will occur, since the condition no longer exists.
- *
- * Callback functions can freely call back into the allocator (i.e. the
- * allocator releases all internal resources before calling each callback
- * function), though allocation is discouraged, since recursive callbacks are
- * likely to result, which places extra burden on the application to avoid
- * deadlock.
- *
- * Callback functions must be thread-safe, since it is possible that multiple
- * threads will call into the same callback function concurrently.
- */
-
-/* Memory reserve condition types. */
-typedef enum {
- RESERVE_CND_LOW,
- RESERVE_CND_CRIT,
- RESERVE_CND_FAIL
-} reserve_cnd_t;
-
-/*
- * Reserve condition notification callback function type definition.
- *
- * Inputs:
- * ctx: Opaque application data, as passed to reserve_cb_register().
- * cnd: Condition type being delivered.
- * size: Allocation request size for the allocation that caused the condition.
- */
-typedef void reserve_cb_t(void *ctx, reserve_cnd_t cnd, size_t size);
-
-/*
- * Register a callback function.
- *
- * Inputs:
- * cb: Callback function pointer.
- * ctx: Opaque application data, passed to cb().
- *
- * Output:
- * ret: If true, failure due to OOM; success otherwise.
- */
-jemalloc_bool reserve_cb_register(reserve_cb_t *cb, void *ctx);
-
-/*
- * Unregister a callback function.
- *
- * Inputs:
- * cb: Callback function pointer.
- * ctx: Opaque application data, same as that passed to reserve_cb_register().
- *
- * Output:
- * ret: False upon success, true if the {cb,ctx} registration could not be
- * found.
- */
-jemalloc_bool reserve_cb_unregister(reserve_cb_t *cb, void *ctx);
-
-/*
- * Get the current reserve size.
- *
- * ret: Current reserve size.
- */
-size_t reserve_cur_get(void);
-
-/*
- * Get the minimum acceptable reserve size. If the reserve drops below this
- * value, the RESERVE_CND_LOW condition notification is sent to the callbacks.
- *
- * ret: Minimum acceptable reserve size.
- */
-size_t reserve_min_get(void);
-
-/*
- * Set the minimum acceptable reserve size.
- *
- * min: Reserve threshold. This value may be internally rounded up.
- * ret: False if the reserve was successfully resized; true otherwise. Note
- * that failure to resize the reserve also results in a RESERVE_CND_LOW
- * condition.
- */
-jemalloc_bool reserve_min_set(size_t min);
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* _JEMALLOC_H_ */
-
diff --git a/third_party/tcmalloc/jemalloc/ql.h b/third_party/tcmalloc/jemalloc/ql.h
deleted file mode 100644
index 593438c..0000000
--- a/third_party/tcmalloc/jemalloc/ql.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/******************************************************************************
- *
- * Copyright (C) 2002 Jason Evans <jasone@canonware.com>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice(s), this list of conditions and the following disclaimer
- * unmodified other than the allowable addition of one or more
- * copyright notices.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice(s), this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- ******************************************************************************/
-
-/*
- * List definitions.
- */
-#define ql_head(a_type) \
-struct { \
- a_type *qlh_first; \
-}
-
-#define ql_head_initializer(a_head) {NULL}
-
-#define ql_elm(a_type) qr(a_type)
-
-/* List functions. */
-#define ql_new(a_head) do { \
- (a_head)->qlh_first = NULL; \
-} while (0)
-
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-
-#define ql_first(a_head) ((a_head)->qlh_first)
-
-#define ql_last(a_head, a_field) \
- ((ql_first(a_head) != NULL) \
- ? qr_prev(ql_first(a_head), a_field) : NULL)
-
-#define ql_next(a_head, a_elm, a_field) \
- ((ql_last(a_head, a_field) != (a_elm)) \
- ? qr_next((a_elm), a_field) : NULL)
-
-#define ql_prev(a_head, a_elm, a_field) \
- ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
- : NULL)
-
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
- qr_before_insert((a_qlelm), (a_elm), a_field); \
- if (ql_first(a_head) == (a_qlelm)) { \
- ql_first(a_head) = (a_elm); \
- } \
-} while (0)
-
-#define ql_after_insert(a_qlelm, a_elm, a_field) \
- qr_after_insert((a_qlelm), (a_elm), a_field)
-
-#define ql_head_insert(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) != NULL) { \
- qr_before_insert(ql_first(a_head), (a_elm), a_field); \
- } \
- ql_first(a_head) = (a_elm); \
-} while (0)
-
-#define ql_tail_insert(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) != NULL) { \
- qr_before_insert(ql_first(a_head), (a_elm), a_field); \
- } \
- ql_first(a_head) = qr_next((a_elm), a_field); \
-} while (0)
-
-#define ql_remove(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) == (a_elm)) { \
- ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
- } \
- if (ql_first(a_head) != (a_elm)) { \
- qr_remove((a_elm), a_field); \
- } else { \
- ql_first(a_head) = NULL; \
- } \
-} while (0)
-
-#define ql_head_remove(a_head, a_type, a_field) do { \
- a_type *t = ql_first(a_head); \
- ql_remove((a_head), t, a_field); \
-} while (0)
-
-#define ql_tail_remove(a_head, a_type, a_field) do { \
- a_type *t = ql_last(a_head, a_field); \
- ql_remove((a_head), t, a_field); \
-} while (0)
-
-#define ql_foreach(a_var, a_head, a_field) \
- qr_foreach((a_var), ql_first(a_head), a_field)
-
-#define ql_reverse_foreach(a_var, a_head, a_field) \
- qr_reverse_foreach((a_var), ql_first(a_head), a_field)
-
diff --git a/third_party/tcmalloc/jemalloc/qr.h b/third_party/tcmalloc/jemalloc/qr.h
deleted file mode 100644
index 5f2bc0a..0000000
--- a/third_party/tcmalloc/jemalloc/qr.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/******************************************************************************
- *
- * Copyright (C) 2002 Jason Evans <jasone@canonware.com>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice(s), this list of conditions and the following disclaimer
- * unmodified other than the allowable addition of one or more
- * copyright notices.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice(s), this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- ******************************************************************************/
-
-/* Ring definitions. */
-#define qr(a_type) \
-struct { \
- a_type *qre_next; \
- a_type *qre_prev; \
-}
-
-/* Ring functions. */
-#define qr_new(a_qr, a_field) do { \
- (a_qr)->a_field.qre_next = (a_qr); \
- (a_qr)->a_field.qre_prev = (a_qr); \
-} while (0)
-
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-
-#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
- (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
- (a_qr)->a_field.qre_next = (a_qrelm); \
- (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
- (a_qrelm)->a_field.qre_prev = (a_qr); \
-} while (0)
-
-#define qr_after_insert(a_qrelm, a_qr, a_field) \
- do \
- { \
- (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
- (a_qr)->a_field.qre_prev = (a_qrelm); \
- (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
- (a_qrelm)->a_field.qre_next = (a_qr); \
- } while (0)
-
-#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
- void *t; \
- (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
- (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
- t = (a_qr_a)->a_field.qre_prev; \
- (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
- (a_qr_b)->a_field.qre_prev = t; \
-} while (0)
-
-/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
- * have two copies of the code. */
-#define qr_split(a_qr_a, a_qr_b, a_field) \
- qr_meld((a_qr_a), (a_qr_b), a_field)
-
-#define qr_remove(a_qr, a_field) do { \
- (a_qr)->a_field.qre_prev->a_field.qre_next \
- = (a_qr)->a_field.qre_next; \
- (a_qr)->a_field.qre_next->a_field.qre_prev \
- = (a_qr)->a_field.qre_prev; \
- (a_qr)->a_field.qre_next = (a_qr); \
- (a_qr)->a_field.qre_prev = (a_qr); \
-} while (0)
-
-#define qr_foreach(var, a_qr, a_field) \
- for ((var) = (a_qr); \
- (var) != NULL; \
- (var) = (((var)->a_field.qre_next != (a_qr)) \
- ? (var)->a_field.qre_next : NULL))
-
-#define qr_reverse_foreach(var, a_qr, a_field) \
- for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
- (var) != NULL; \
- (var) = (((var) != (a_qr)) \
- ? (var)->a_field.qre_prev : NULL))
-
diff --git a/third_party/tcmalloc/jemalloc/rb.h b/third_party/tcmalloc/jemalloc/rb.h
deleted file mode 100644
index 05fb4fe..0000000
--- a/third_party/tcmalloc/jemalloc/rb.h
+++ /dev/null
@@ -1,983 +0,0 @@
-/******************************************************************************
- *
- * Copyright (C) 2008 Jason Evans <jasone@FreeBSD.org>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice(s), this list of conditions and the following disclaimer
- * unmodified other than the allowable addition of one or more
- * copyright notices.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice(s), this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- ******************************************************************************
- *
- * cpp macro implementation of left-leaning red-black trees.
- *
- * Usage:
- *
- * (Optional.)
- * #define SIZEOF_PTR ...
- * #define SIZEOF_PTR_2POW ...
- * #define RB_NO_C99_VARARRAYS
- *
- * (Optional, see assert(3).)
- * #define NDEBUG
- *
- * (Required.)
- * #include <assert.h>
- * #include <rb.h>
- * ...
- *
- * All operations are done non-recursively. Parent pointers are not used, and
- * color bits are stored in the least significant bit of right-child pointers,
- * thus making node linkage as compact as is possible for red-black trees.
- *
- * Some macros use a comparison function pointer, which is expected to have the
- * following prototype:
- *
- * int (a_cmp *)(a_type *a_node, a_type *a_other);
- * ^^^^^^
- * or a_key
- *
- * Interpretation of comparision function return values:
- *
- * -1 : a_node < a_other
- * 0 : a_node == a_other
- * 1 : a_node > a_other
- *
- * In all cases, the a_node or a_key macro argument is the first argument to the
- * comparison function, which makes it possible to write comparison functions
- * that treat the first argument specially.
- *
- ******************************************************************************/
-
-#ifndef RB_H_
-#define RB_H_
-
-#if 0
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 178995 2008-05-14 18:33:13Z jasone $");
-#endif
-
-/* Node structure. */
-#define rb_node(a_type) \
-struct { \
- a_type *rbn_left; \
- a_type *rbn_right_red; \
-}
-
-/* Root structure. */
-#define rb_tree(a_type) \
-struct { \
- a_type *rbt_root; \
- a_type rbt_nil; \
-}
-
-/* Left accessors. */
-#define rbp_left_get(a_type, a_field, a_node) \
- ((a_node)->a_field.rbn_left)
-#define rbp_left_set(a_type, a_field, a_node, a_left) do { \
- (a_node)->a_field.rbn_left = a_left; \
-} while (0)
-
-/* Right accessors. */
-#define rbp_right_get(a_type, a_field, a_node) \
- ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
- & ((ssize_t)-2)))
-#define rbp_right_set(a_type, a_field, a_node, a_right) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
- | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
-} while (0)
-
-/* Color accessors. */
-#define rbp_red_get(a_type, a_field, a_node) \
- ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
- & ((size_t)1)))
-#define rbp_color_set(a_type, a_field, a_node, a_red) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
- (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
- | ((ssize_t)a_red)); \
-} while (0)
-#define rbp_red_set(a_type, a_field, a_node) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
- (a_node)->a_field.rbn_right_red) | ((size_t)1)); \
-} while (0)
-#define rbp_black_set(a_type, a_field, a_node) do { \
- (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
- (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
-} while (0)
-
-/* Node initializer. */
-#define rbp_node_new(a_type, a_field, a_tree, a_node) do { \
- rbp_left_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
- rbp_right_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
- rbp_red_set(a_type, a_field, (a_node)); \
-} while (0)
-
-/* Tree initializer. */
-#define rb_new(a_type, a_field, a_tree) do { \
- (a_tree)->rbt_root = &(a_tree)->rbt_nil; \
- rbp_node_new(a_type, a_field, a_tree, &(a_tree)->rbt_nil); \
- rbp_black_set(a_type, a_field, &(a_tree)->rbt_nil); \
-} while (0)
-
-/* Tree operations. */
-#define rbp_black_height(a_type, a_field, a_tree, r_height) do { \
- a_type *rbp_bh_t; \
- for (rbp_bh_t = (a_tree)->rbt_root, (r_height) = 0; \
- rbp_bh_t != &(a_tree)->rbt_nil; \
- rbp_bh_t = rbp_left_get(a_type, a_field, rbp_bh_t)) { \
- if (rbp_red_get(a_type, a_field, rbp_bh_t) == false) { \
- (r_height)++; \
- } \
- } \
-} while (0)
-
-#define rbp_first(a_type, a_field, a_tree, a_root, r_node) do { \
- for ((r_node) = (a_root); \
- rbp_left_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
- (r_node) = rbp_left_get(a_type, a_field, (r_node))) { \
- } \
-} while (0)
-
-#define rbp_last(a_type, a_field, a_tree, a_root, r_node) do { \
- for ((r_node) = (a_root); \
- rbp_right_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
- (r_node) = rbp_right_get(a_type, a_field, (r_node))) { \
- } \
-} while (0)
-
-#define rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
- if (rbp_right_get(a_type, a_field, (a_node)) \
- != &(a_tree)->rbt_nil) { \
- rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type, \
- a_field, (a_node)), (r_node)); \
- } else { \
- a_type *rbp_n_t = (a_tree)->rbt_root; \
- assert(rbp_n_t != &(a_tree)->rbt_nil); \
- (r_node) = &(a_tree)->rbt_nil; \
- while (true) { \
- int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t); \
- if (rbp_n_cmp < 0) { \
- (r_node) = rbp_n_t; \
- rbp_n_t = rbp_left_get(a_type, a_field, rbp_n_t); \
- } else if (rbp_n_cmp > 0) { \
- rbp_n_t = rbp_right_get(a_type, a_field, rbp_n_t); \
- } else { \
- break; \
- } \
- assert(rbp_n_t != &(a_tree)->rbt_nil); \
- } \
- } \
-} while (0)
-
-#define rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
- if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
- rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type, \
- a_field, (a_node)), (r_node)); \
- } else { \
- a_type *rbp_p_t = (a_tree)->rbt_root; \
- assert(rbp_p_t != &(a_tree)->rbt_nil); \
- (r_node) = &(a_tree)->rbt_nil; \
- while (true) { \
- int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t); \
- if (rbp_p_cmp < 0) { \
- rbp_p_t = rbp_left_get(a_type, a_field, rbp_p_t); \
- } else if (rbp_p_cmp > 0) { \
- (r_node) = rbp_p_t; \
- rbp_p_t = rbp_right_get(a_type, a_field, rbp_p_t); \
- } else { \
- break; \
- } \
- assert(rbp_p_t != &(a_tree)->rbt_nil); \
- } \
- } \
-} while (0)
-
-#define rb_first(a_type, a_field, a_tree, r_node) do { \
- rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node)); \
- if ((r_node) == &(a_tree)->rbt_nil) { \
- (r_node) = NULL; \
- } \
-} while (0)
-
-#define rb_last(a_type, a_field, a_tree, r_node) do { \
- rbp_last(a_type, a_field, a_tree, (a_tree)->rbt_root, r_node); \
- if ((r_node) == &(a_tree)->rbt_nil) { \
- (r_node) = NULL; \
- } \
-} while (0)
-
-#define rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
- rbp_next(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
- if ((r_node) == &(a_tree)->rbt_nil) { \
- (r_node) = NULL; \
- } \
-} while (0)
-
-#define rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
- rbp_prev(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
- if ((r_node) == &(a_tree)->rbt_nil) { \
- (r_node) = NULL; \
- } \
-} while (0)
-
-#define rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
- int rbp_se_cmp; \
- (r_node) = (a_tree)->rbt_root; \
- while ((r_node) != &(a_tree)->rbt_nil \
- && (rbp_se_cmp = (a_cmp)((a_key), (r_node))) != 0) { \
- if (rbp_se_cmp < 0) { \
- (r_node) = rbp_left_get(a_type, a_field, (r_node)); \
- } else { \
- (r_node) = rbp_right_get(a_type, a_field, (r_node)); \
- } \
- } \
- if ((r_node) == &(a_tree)->rbt_nil) { \
- (r_node) = NULL; \
- } \
-} while (0)
-
-/*
- * Find a match if it exists. Otherwise, find the next greater node, if one
- * exists.
- */
-#define rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
- a_type *rbp_ns_t = (a_tree)->rbt_root; \
- (r_node) = NULL; \
- while (rbp_ns_t != &(a_tree)->rbt_nil) { \
- int rbp_ns_cmp = (a_cmp)((a_key), rbp_ns_t); \
- if (rbp_ns_cmp < 0) { \
- (r_node) = rbp_ns_t; \
- rbp_ns_t = rbp_left_get(a_type, a_field, rbp_ns_t); \
- } else if (rbp_ns_cmp > 0) { \
- rbp_ns_t = rbp_right_get(a_type, a_field, rbp_ns_t); \
- } else { \
- (r_node) = rbp_ns_t; \
- break; \
- } \
- } \
-} while (0)
-
-/*
- * Find a match if it exists. Otherwise, find the previous lesser node, if one
- * exists.
- */
-#define rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
- a_type *rbp_ps_t = (a_tree)->rbt_root; \
- (r_node) = NULL; \
- while (rbp_ps_t != &(a_tree)->rbt_nil) { \
- int rbp_ps_cmp = (a_cmp)((a_key), rbp_ps_t); \
- if (rbp_ps_cmp < 0) { \
- rbp_ps_t = rbp_left_get(a_type, a_field, rbp_ps_t); \
- } else if (rbp_ps_cmp > 0) { \
- (r_node) = rbp_ps_t; \
- rbp_ps_t = rbp_right_get(a_type, a_field, rbp_ps_t); \
- } else { \
- (r_node) = rbp_ps_t; \
- break; \
- } \
- } \
-} while (0)
-
-#define rbp_rotate_left(a_type, a_field, a_node, r_node) do { \
- (r_node) = rbp_right_get(a_type, a_field, (a_node)); \
- rbp_right_set(a_type, a_field, (a_node), \
- rbp_left_get(a_type, a_field, (r_node))); \
- rbp_left_set(a_type, a_field, (r_node), (a_node)); \
-} while (0)
-
-#define rbp_rotate_right(a_type, a_field, a_node, r_node) do { \
- (r_node) = rbp_left_get(a_type, a_field, (a_node)); \
- rbp_left_set(a_type, a_field, (a_node), \
- rbp_right_get(a_type, a_field, (r_node))); \
- rbp_right_set(a_type, a_field, (r_node), (a_node)); \
-} while (0)
-
-#define rbp_lean_left(a_type, a_field, a_node, r_node) do { \
- bool rbp_ll_red; \
- rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
- rbp_ll_red = rbp_red_get(a_type, a_field, (a_node)); \
- rbp_color_set(a_type, a_field, (r_node), rbp_ll_red); \
- rbp_red_set(a_type, a_field, (a_node)); \
-} while (0)
-
-#define rbp_lean_right(a_type, a_field, a_node, r_node) do { \
- bool rbp_lr_red; \
- rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
- rbp_lr_red = rbp_red_get(a_type, a_field, (a_node)); \
- rbp_color_set(a_type, a_field, (r_node), rbp_lr_red); \
- rbp_red_set(a_type, a_field, (a_node)); \
-} while (0)
-
-#define rbp_move_red_left(a_type, a_field, a_node, r_node) do { \
- a_type *rbp_mrl_t, *rbp_mrl_u; \
- rbp_mrl_t = rbp_left_get(a_type, a_field, (a_node)); \
- rbp_red_set(a_type, a_field, rbp_mrl_t); \
- rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
- rbp_mrl_u = rbp_left_get(a_type, a_field, rbp_mrl_t); \
- if (rbp_red_get(a_type, a_field, rbp_mrl_u)) { \
- rbp_rotate_right(a_type, a_field, rbp_mrl_t, rbp_mrl_u); \
- rbp_right_set(a_type, a_field, (a_node), rbp_mrl_u); \
- rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
- rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
- if (rbp_red_get(a_type, a_field, rbp_mrl_t)) { \
- rbp_black_set(a_type, a_field, rbp_mrl_t); \
- rbp_red_set(a_type, a_field, (a_node)); \
- rbp_rotate_left(a_type, a_field, (a_node), rbp_mrl_t); \
- rbp_left_set(a_type, a_field, (r_node), rbp_mrl_t); \
- } else { \
- rbp_black_set(a_type, a_field, (a_node)); \
- } \
- } else { \
- rbp_red_set(a_type, a_field, (a_node)); \
- rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
- } \
-} while (0)
-
-#define rbp_move_red_right(a_type, a_field, a_node, r_node) do { \
- a_type *rbp_mrr_t; \
- rbp_mrr_t = rbp_left_get(a_type, a_field, (a_node)); \
- if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
- a_type *rbp_mrr_u, *rbp_mrr_v; \
- rbp_mrr_u = rbp_right_get(a_type, a_field, rbp_mrr_t); \
- rbp_mrr_v = rbp_left_get(a_type, a_field, rbp_mrr_u); \
- if (rbp_red_get(a_type, a_field, rbp_mrr_v)) { \
- rbp_color_set(a_type, a_field, rbp_mrr_u, \
- rbp_red_get(a_type, a_field, (a_node))); \
- rbp_black_set(a_type, a_field, rbp_mrr_v); \
- rbp_rotate_left(a_type, a_field, rbp_mrr_t, rbp_mrr_u); \
- rbp_left_set(a_type, a_field, (a_node), rbp_mrr_u); \
- rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
- rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
- rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
- } else { \
- rbp_color_set(a_type, a_field, rbp_mrr_t, \
- rbp_red_get(a_type, a_field, (a_node))); \
- rbp_red_set(a_type, a_field, rbp_mrr_u); \
- rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
- rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
- rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
- } \
- rbp_red_set(a_type, a_field, (a_node)); \
- } else { \
- rbp_red_set(a_type, a_field, rbp_mrr_t); \
- rbp_mrr_t = rbp_left_get(a_type, a_field, rbp_mrr_t); \
- if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
- rbp_black_set(a_type, a_field, rbp_mrr_t); \
- rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
- rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
- rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
- } else { \
- rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
- } \
- } \
-} while (0)
-
-#define rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do { \
- a_type rbp_i_s; \
- a_type *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u; \
- int rbp_i_cmp = 0; \
- rbp_i_g = &(a_tree)->rbt_nil; \
- rbp_left_set(a_type, a_field, &rbp_i_s, (a_tree)->rbt_root); \
- rbp_right_set(a_type, a_field, &rbp_i_s, &(a_tree)->rbt_nil); \
- rbp_black_set(a_type, a_field, &rbp_i_s); \
- rbp_i_p = &rbp_i_s; \
- rbp_i_c = (a_tree)->rbt_root; \
- /* Iteratively search down the tree for the insertion point, */\
- /* splitting 4-nodes as they are encountered. At the end of each */\
- /* iteration, rbp_i_g->rbp_i_p->rbp_i_c is a 3-level path down */\
- /* the tree, assuming a sufficiently deep tree. */\
- while (rbp_i_c != &(a_tree)->rbt_nil) { \
- rbp_i_t = rbp_left_get(a_type, a_field, rbp_i_c); \
- rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
- if (rbp_red_get(a_type, a_field, rbp_i_t) \
- && rbp_red_get(a_type, a_field, rbp_i_u)) { \
- /* rbp_i_c is the top of a logical 4-node, so split it. */\
- /* This iteration does not move down the tree, due to the */\
- /* disruptiveness of node splitting. */\
- /* */\
- /* Rotate right. */\
- rbp_rotate_right(a_type, a_field, rbp_i_c, rbp_i_t); \
- /* Pass red links up one level. */\
- rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
- rbp_black_set(a_type, a_field, rbp_i_u); \
- if (rbp_left_get(a_type, a_field, rbp_i_p) == rbp_i_c) { \
- rbp_left_set(a_type, a_field, rbp_i_p, rbp_i_t); \
- rbp_i_c = rbp_i_t; \
- } else { \
- /* rbp_i_c was the right child of rbp_i_p, so rotate */\
- /* left in order to maintain the left-leaning */\
- /* invariant. */\
- assert(rbp_right_get(a_type, a_field, rbp_i_p) \
- == rbp_i_c); \
- rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t); \
- rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u); \
- if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
- rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u); \
- } else { \
- assert(rbp_right_get(a_type, a_field, rbp_i_g) \
- == rbp_i_p); \
- rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u); \
- } \
- rbp_i_p = rbp_i_u; \
- rbp_i_cmp = (a_cmp)((a_node), rbp_i_p); \
- if (rbp_i_cmp < 0) { \
- rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p); \
- } else { \
- assert(rbp_i_cmp > 0); \
- rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p); \
- } \
- continue; \
- } \
- } \
- rbp_i_g = rbp_i_p; \
- rbp_i_p = rbp_i_c; \
- rbp_i_cmp = (a_cmp)((a_node), rbp_i_c); \
- if (rbp_i_cmp < 0) { \
- rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c); \
- } else { \
- assert(rbp_i_cmp > 0); \
- rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c); \
- } \
- } \
- /* rbp_i_p now refers to the node under which to insert. */\
- rbp_node_new(a_type, a_field, a_tree, (a_node)); \
- if (rbp_i_cmp > 0) { \
- rbp_right_set(a_type, a_field, rbp_i_p, (a_node)); \
- rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_t); \
- if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) { \
- rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_t); \
- } else if (rbp_right_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
- rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_t); \
- } \
- } else { \
- rbp_left_set(a_type, a_field, rbp_i_p, (a_node)); \
- } \
- /* Update the root and make sure that it is black. */\
- (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_i_s); \
- rbp_black_set(a_type, a_field, (a_tree)->rbt_root); \
-} while (0)
-
-#define rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do { \
- a_type rbp_r_s; \
- a_type *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u; \
- int rbp_r_cmp; \
- rbp_left_set(a_type, a_field, &rbp_r_s, (a_tree)->rbt_root); \
- rbp_right_set(a_type, a_field, &rbp_r_s, &(a_tree)->rbt_nil); \
- rbp_black_set(a_type, a_field, &rbp_r_s); \
- rbp_r_p = &rbp_r_s; \
- rbp_r_c = (a_tree)->rbt_root; \
- rbp_r_xp = &(a_tree)->rbt_nil; \
- /* Iterate down the tree, but always transform 2-nodes to 3- or */\
- /* 4-nodes in order to maintain the invariant that the current */\
- /* node is not a 2-node. This allows simple deletion once a leaf */\
- /* is reached. Handle the root specially though, since there may */\
- /* be no way to convert it from a 2-node to a 3-node. */\
- rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
- if (rbp_r_cmp < 0) { \
- rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
- rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
- if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
- && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
- /* Apply standard transform to prepare for left move. */\
- rbp_move_red_left(a_type, a_field, rbp_r_c, rbp_r_t); \
- rbp_black_set(a_type, a_field, rbp_r_t); \
- rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
- rbp_r_c = rbp_r_t; \
- } else { \
- /* Move left. */\
- rbp_r_p = rbp_r_c; \
- rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
- } \
- } else { \
- if (rbp_r_cmp == 0) { \
- assert((a_node) == rbp_r_c); \
- if (rbp_right_get(a_type, a_field, rbp_r_c) \
- == &(a_tree)->rbt_nil) { \
- /* Delete root node (which is also a leaf node). */\
- if (rbp_left_get(a_type, a_field, rbp_r_c) \
- != &(a_tree)->rbt_nil) { \
- rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t); \
- rbp_right_set(a_type, a_field, rbp_r_t, \
- &(a_tree)->rbt_nil); \
- } else { \
- rbp_r_t = &(a_tree)->rbt_nil; \
- } \
- rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
- } else { \
- /* This is the node we want to delete, but we will */\
- /* instead swap it with its successor and delete the */\
- /* successor. Record enough information to do the */\
- /* swap later. rbp_r_xp is the a_node's parent. */\
- rbp_r_xp = rbp_r_p; \
- rbp_r_cmp = 1; /* Note that deletion is incomplete. */\
- } \
- } \
- if (rbp_r_cmp == 1) { \
- if (rbp_red_get(a_type, a_field, rbp_left_get(a_type, \
- a_field, rbp_right_get(a_type, a_field, rbp_r_c))) \
- == false) { \
- rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
- if (rbp_red_get(a_type, a_field, rbp_r_t)) { \
- /* Standard transform. */\
- rbp_move_red_right(a_type, a_field, rbp_r_c, \
- rbp_r_t); \
- } else { \
- /* Root-specific transform. */\
- rbp_red_set(a_type, a_field, rbp_r_c); \
- rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
- if (rbp_red_get(a_type, a_field, rbp_r_u)) { \
- rbp_black_set(a_type, a_field, rbp_r_u); \
- rbp_rotate_right(a_type, a_field, rbp_r_c, \
- rbp_r_t); \
- rbp_rotate_left(a_type, a_field, rbp_r_c, \
- rbp_r_u); \
- rbp_right_set(a_type, a_field, rbp_r_t, \
- rbp_r_u); \
- } else { \
- rbp_red_set(a_type, a_field, rbp_r_t); \
- rbp_rotate_left(a_type, a_field, rbp_r_c, \
- rbp_r_t); \
- } \
- } \
- rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
- rbp_r_c = rbp_r_t; \
- } else { \
- /* Move right. */\
- rbp_r_p = rbp_r_c; \
- rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
- } \
- } \
- } \
- if (rbp_r_cmp != 0) { \
- while (true) { \
- assert(rbp_r_p != &(a_tree)->rbt_nil); \
- rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
- if (rbp_r_cmp < 0) { \
- rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
- if (rbp_r_t == &(a_tree)->rbt_nil) { \
- /* rbp_r_c now refers to the successor node to */\
- /* relocate, and rbp_r_xp/a_node refer to the */\
- /* context for the relocation. */\
- if (rbp_left_get(a_type, a_field, rbp_r_xp) \
- == (a_node)) { \
- rbp_left_set(a_type, a_field, rbp_r_xp, \
- rbp_r_c); \
- } else { \
- assert(rbp_right_get(a_type, a_field, \
- rbp_r_xp) == (a_node)); \
- rbp_right_set(a_type, a_field, rbp_r_xp, \
- rbp_r_c); \
- } \
- rbp_left_set(a_type, a_field, rbp_r_c, \
- rbp_left_get(a_type, a_field, (a_node))); \
- rbp_right_set(a_type, a_field, rbp_r_c, \
- rbp_right_get(a_type, a_field, (a_node))); \
- rbp_color_set(a_type, a_field, rbp_r_c, \
- rbp_red_get(a_type, a_field, (a_node))); \
- if (rbp_left_get(a_type, a_field, rbp_r_p) \
- == rbp_r_c) { \
- rbp_left_set(a_type, a_field, rbp_r_p, \
- &(a_tree)->rbt_nil); \
- } else { \
- assert(rbp_right_get(a_type, a_field, rbp_r_p) \
- == rbp_r_c); \
- rbp_right_set(a_type, a_field, rbp_r_p, \
- &(a_tree)->rbt_nil); \
- } \
- break; \
- } \
- rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
- if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
- && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
- rbp_move_red_left(a_type, a_field, rbp_r_c, \
- rbp_r_t); \
- if (rbp_left_get(a_type, a_field, rbp_r_p) \
- == rbp_r_c) { \
- rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
- } else { \
- rbp_right_set(a_type, a_field, rbp_r_p, \
- rbp_r_t); \
- } \
- rbp_r_c = rbp_r_t; \
- } else { \
- rbp_r_p = rbp_r_c; \
- rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
- } \
- } else { \
- /* Check whether to delete this node (it has to be */\
- /* the correct node and a leaf node). */\
- if (rbp_r_cmp == 0) { \
- assert((a_node) == rbp_r_c); \
- if (rbp_right_get(a_type, a_field, rbp_r_c) \
- == &(a_tree)->rbt_nil) { \
- /* Delete leaf node. */\
- if (rbp_left_get(a_type, a_field, rbp_r_c) \
- != &(a_tree)->rbt_nil) { \
- rbp_lean_right(a_type, a_field, rbp_r_c, \
- rbp_r_t); \
- rbp_right_set(a_type, a_field, rbp_r_t, \
- &(a_tree)->rbt_nil); \
- } else { \
- rbp_r_t = &(a_tree)->rbt_nil; \
- } \
- if (rbp_left_get(a_type, a_field, rbp_r_p) \
- == rbp_r_c) { \
- rbp_left_set(a_type, a_field, rbp_r_p, \
- rbp_r_t); \
- } else { \
- rbp_right_set(a_type, a_field, rbp_r_p, \
- rbp_r_t); \
- } \
- break; \
- } else { \
- /* This is the node we want to delete, but we */\
- /* will instead swap it with its successor */\
- /* and delete the successor. Record enough */\
- /* information to do the swap later. */\
- /* rbp_r_xp is a_node's parent. */\
- rbp_r_xp = rbp_r_p; \
- } \
- } \
- rbp_r_t = rbp_right_get(a_type, a_field, rbp_r_c); \
- rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
- if (rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
- rbp_move_red_right(a_type, a_field, rbp_r_c, \
- rbp_r_t); \
- if (rbp_left_get(a_type, a_field, rbp_r_p) \
- == rbp_r_c) { \
- rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
- } else { \
- rbp_right_set(a_type, a_field, rbp_r_p, \
- rbp_r_t); \
- } \
- rbp_r_c = rbp_r_t; \
- } else { \
- rbp_r_p = rbp_r_c; \
- rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
- } \
- } \
- } \
- } \
- /* Update root. */\
- (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_r_s); \
-} while (0)
-
-/*
- * The rb_wrap() macro provides a convenient way to wrap functions around the
- * cpp macros. The main benefits of wrapping are that 1) repeated macro
- * expansion can cause code bloat, especially for rb_{insert,remove)(), and
- * 2) type, linkage, comparison functions, etc. need not be specified at every
- * call point.
- */
-
-#define rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp) \
-a_attr void \
-a_prefix##new(a_tree_type *tree) { \
- rb_new(a_type, a_field, tree); \
-} \
-a_attr a_type * \
-a_prefix##first(a_tree_type *tree) { \
- a_type *ret; \
- rb_first(a_type, a_field, tree, ret); \
- return (ret); \
-} \
-a_attr a_type * \
-a_prefix##last(a_tree_type *tree) { \
- a_type *ret; \
- rb_last(a_type, a_field, tree, ret); \
- return (ret); \
-} \
-a_attr a_type * \
-a_prefix##next(a_tree_type *tree, a_type *node) { \
- a_type *ret; \
- rb_next(a_type, a_field, a_cmp, tree, node, ret); \
- return (ret); \
-} \
-a_attr a_type * \
-a_prefix##prev(a_tree_type *tree, a_type *node) { \
- a_type *ret; \
- rb_prev(a_type, a_field, a_cmp, tree, node, ret); \
- return (ret); \
-} \
-a_attr a_type * \
-a_prefix##search(a_tree_type *tree, a_type *key) { \
- a_type *ret; \
- rb_search(a_type, a_field, a_cmp, tree, key, ret); \
- return (ret); \
-} \
-a_attr a_type * \
-a_prefix##nsearch(a_tree_type *tree, a_type *key) { \
- a_type *ret; \
- rb_nsearch(a_type, a_field, a_cmp, tree, key, ret); \
- return (ret); \
-} \
-a_attr a_type * \
-a_prefix##psearch(a_tree_type *tree, a_type *key) { \
- a_type *ret; \
- rb_psearch(a_type, a_field, a_cmp, tree, key, ret); \
- return (ret); \
-} \
-a_attr void \
-a_prefix##insert(a_tree_type *tree, a_type *node) { \
- rb_insert(a_type, a_field, a_cmp, tree, node); \
-} \
-a_attr void \
-a_prefix##remove(a_tree_type *tree, a_type *node) { \
- rb_remove(a_type, a_field, a_cmp, tree, node); \
-}
-
-/*
- * The iterators simulate recursion via an array of pointers that store the
- * current path. This is critical to performance, since a series of calls to
- * rb_{next,prev}() would require time proportional to (n lg n), whereas this
- * implementation only requires time proportional to (n).
- *
- * Since the iterators cache a path down the tree, any tree modification may
- * cause the cached path to become invalid. In order to continue iteration,
- * use something like the following sequence:
- *
- * {
- * a_type *node, *tnode;
- *
- * rb_foreach_begin(a_type, a_field, a_tree, node) {
- * ...
- * rb_next(a_type, a_field, a_cmp, a_tree, node, tnode);
- * rb_remove(a_type, a_field, a_cmp, a_tree, node);
- * rb_foreach_next(a_type, a_field, a_cmp, a_tree, tnode);
- * ...
- * } rb_foreach_end(a_type, a_field, a_tree, node)
- * }
- *
- * Note that this idiom is not advised if every iteration modifies the tree,
- * since in that case there is no algorithmic complexity improvement over a
- * series of rb_{next,prev}() calls, thus making the setup overhead wasted
- * effort.
- */
-
-#ifdef RB_NO_C99_VARARRAYS
- /*
- * Avoid using variable-length arrays, at the cost of using more stack space.
- * Size the path arrays such that they are always large enough, even if a
- * tree consumes all of memory. Since each node must contain a minimum of
- * two pointers, there can never be more nodes than:
- *
- * 1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
- *
- * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
- * is:
- *
- * (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
- *
- * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
- * systems, respectively (approximatly 348 and 1440 bytes, respectively).
- */
-# define rbp_compute_f_height(a_type, a_field, a_tree)
-# define rbp_f_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
-# define rbp_compute_fr_height(a_type, a_field, a_tree)
-# define rbp_fr_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
-#else
-# define rbp_compute_f_height(a_type, a_field, a_tree) \
- /* Compute the maximum possible tree depth (3X the black height). */\
- unsigned rbp_f_height; \
- rbp_black_height(a_type, a_field, a_tree, rbp_f_height); \
- rbp_f_height *= 3;
-# define rbp_compute_fr_height(a_type, a_field, a_tree) \
- /* Compute the maximum possible tree depth (3X the black height). */\
- unsigned rbp_fr_height; \
- rbp_black_height(a_type, a_field, a_tree, rbp_fr_height); \
- rbp_fr_height *= 3;
-#endif
-
-#define rb_foreach_begin(a_type, a_field, a_tree, a_var) { \
- rbp_compute_f_height(a_type, a_field, a_tree) \
- { \
- /* Initialize the path to contain the left spine. */\
- a_type *rbp_f_path[rbp_f_height]; \
- a_type *rbp_f_node; \
- bool rbp_f_synced = false; \
- unsigned rbp_f_depth = 0; \
- if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
- rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
- rbp_f_depth++; \
- while ((rbp_f_node = rbp_left_get(a_type, a_field, \
- rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
- rbp_f_path[rbp_f_depth] = rbp_f_node; \
- rbp_f_depth++; \
- } \
- } \
- /* While the path is non-empty, iterate. */\
- while (rbp_f_depth > 0) { \
- (a_var) = rbp_f_path[rbp_f_depth-1];
-
-/* Only use if modifying the tree during iteration. */
-#define rb_foreach_next(a_type, a_field, a_cmp, a_tree, a_node) \
- /* Re-initialize the path to contain the path to a_node. */\
- rbp_f_depth = 0; \
- if (a_node != NULL) { \
- if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
- rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
- rbp_f_depth++; \
- rbp_f_node = rbp_f_path[0]; \
- while (true) { \
- int rbp_f_cmp = (a_cmp)((a_node), \
- rbp_f_path[rbp_f_depth-1]); \
- if (rbp_f_cmp < 0) { \
- rbp_f_node = rbp_left_get(a_type, a_field, \
- rbp_f_path[rbp_f_depth-1]); \
- } else if (rbp_f_cmp > 0) { \
- rbp_f_node = rbp_right_get(a_type, a_field, \
- rbp_f_path[rbp_f_depth-1]); \
- } else { \
- break; \
- } \
- assert(rbp_f_node != &(a_tree)->rbt_nil); \
- rbp_f_path[rbp_f_depth] = rbp_f_node; \
- rbp_f_depth++; \
- } \
- } \
- } \
- rbp_f_synced = true;
-
-#define rb_foreach_end(a_type, a_field, a_tree, a_var) \
- if (rbp_f_synced) { \
- rbp_f_synced = false; \
- continue; \
- } \
- /* Find the successor. */\
- if ((rbp_f_node = rbp_right_get(a_type, a_field, \
- rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
- /* The successor is the left-most node in the right */\
- /* subtree. */\
- rbp_f_path[rbp_f_depth] = rbp_f_node; \
- rbp_f_depth++; \
- while ((rbp_f_node = rbp_left_get(a_type, a_field, \
- rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
- rbp_f_path[rbp_f_depth] = rbp_f_node; \
- rbp_f_depth++; \
- } \
- } else { \
- /* The successor is above the current node. Unwind */\
- /* until a left-leaning edge is removed from the */\
- /* path, or the path is empty. */\
- for (rbp_f_depth--; rbp_f_depth > 0; rbp_f_depth--) { \
- if (rbp_left_get(a_type, a_field, \
- rbp_f_path[rbp_f_depth-1]) \
- == rbp_f_path[rbp_f_depth]) { \
- break; \
- } \
- } \
- } \
- } \
- } \
-}
-
-#define rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) { \
- rbp_compute_fr_height(a_type, a_field, a_tree) \
- { \
- /* Initialize the path to contain the right spine. */\
- a_type *rbp_fr_path[rbp_fr_height]; \
- a_type *rbp_fr_node; \
- bool rbp_fr_synced = false; \
- unsigned rbp_fr_depth = 0; \
- if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
- rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
- rbp_fr_depth++; \
- while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
- rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
- rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
- rbp_fr_depth++; \
- } \
- } \
- /* While the path is non-empty, iterate. */\
- while (rbp_fr_depth > 0) { \
- (a_var) = rbp_fr_path[rbp_fr_depth-1];
-
-/* Only use if modifying the tree during iteration. */
-#define rb_foreach_reverse_prev(a_type, a_field, a_cmp, a_tree, a_node) \
- /* Re-initialize the path to contain the path to a_node. */\
- rbp_fr_depth = 0; \
- if (a_node != NULL) { \
- if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
- rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
- rbp_fr_depth++; \
- rbp_fr_node = rbp_fr_path[0]; \
- while (true) { \
- int rbp_fr_cmp = (a_cmp)((a_node), \
- rbp_fr_path[rbp_fr_depth-1]); \
- if (rbp_fr_cmp < 0) { \
- rbp_fr_node = rbp_left_get(a_type, a_field, \
- rbp_fr_path[rbp_fr_depth-1]); \
- } else if (rbp_fr_cmp > 0) { \
- rbp_fr_node = rbp_right_get(a_type, a_field,\
- rbp_fr_path[rbp_fr_depth-1]); \
- } else { \
- break; \
- } \
- assert(rbp_fr_node != &(a_tree)->rbt_nil); \
- rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
- rbp_fr_depth++; \
- } \
- } \
- } \
- rbp_fr_synced = true;
-
-#define rb_foreach_reverse_end(a_type, a_field, a_tree, a_var) \
- if (rbp_fr_synced) { \
- rbp_fr_synced = false; \
- continue; \
- } \
- if (rbp_fr_depth == 0) { \
- /* rb_foreach_reverse_sync() was called with a NULL */\
- /* a_node. */\
- break; \
- } \
- /* Find the predecessor. */\
- if ((rbp_fr_node = rbp_left_get(a_type, a_field, \
- rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
- /* The predecessor is the right-most node in the left */\
- /* subtree. */\
- rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
- rbp_fr_depth++; \
- while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
- rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {\
- rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
- rbp_fr_depth++; \
- } \
- } else { \
- /* The predecessor is above the current node. Unwind */\
- /* until a right-leaning edge is removed from the */\
- /* path, or the path is empty. */\
- for (rbp_fr_depth--; rbp_fr_depth > 0; rbp_fr_depth--) {\
- if (rbp_right_get(a_type, a_field, \
- rbp_fr_path[rbp_fr_depth-1]) \
- == rbp_fr_path[rbp_fr_depth]) { \
- break; \
- } \
- } \
- } \
- } \
- } \
-}
-
-#endif /* RB_H_ */
-
diff --git a/third_party/tcmalloc/page_heap.cc b/third_party/tcmalloc/page_heap.cc
deleted file mode 100644
index f92cfc4..0000000
--- a/third_party/tcmalloc/page_heap.cc
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-
-#include <config.h>
-#include "page_heap.h"
-
-#include "static_vars.h"
-#include "system-alloc.h"
-
-DEFINE_double(tcmalloc_release_rate,
- EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
- "Rate at which we release unused memory to the system. "
- "Zero means we never release memory back to the system. "
- "Increase this flag to return memory faster; decrease it "
- "to return memory slower. Reasonable rates are in the "
- "range [0,10]");
-
-namespace tcmalloc {
-
-PageHeap::PageHeap()
- : pagemap_(MetaDataAlloc),
- pagemap_cache_(0),
- free_pages_(0),
- system_bytes_(0),
- committed_bytes_(0),
- scavenge_counter_(0),
- // Start scavenging at kMaxPages list
- scavenge_index_(kMaxPages-1) {
- COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
- DLL_Init(&large_.normal);
- DLL_Init(&large_.returned);
- for (int i = 0; i < kMaxPages; i++) {
- DLL_Init(&free_[i].normal);
- DLL_Init(&free_[i].returned);
- }
-}
-
-Span* PageHeap::New(Length n) {
- ASSERT(Check());
- ASSERT(n > 0);
-
- // Find first size >= n that has a non-empty list
- for (Length s = n; s < kMaxPages; s++) {
- Span* ll = &free_[s].normal;
- // If we're lucky, ll is non-empty, meaning it has a suitable span.
- if (!DLL_IsEmpty(ll)) {
- ASSERT(ll->next->location == Span::ON_NORMAL_FREELIST);
- return Carve(ll->next, n);
- }
- // Alternatively, maybe there's a usable returned span.
- ll = &free_[s].returned;
- if (!DLL_IsEmpty(ll)) {
- ASSERT(ll->next->location == Span::ON_RETURNED_FREELIST);
- return Carve(ll->next, n);
- }
- // Still no luck, so keep looking in larger classes.
- }
-
- Span* result = AllocLarge(n);
- if (result != NULL) return result;
-
- // Grow the heap and try again
- if (!GrowHeap(n)) {
- ASSERT(Check());
- return NULL;
- }
-
- return AllocLarge(n);
-}
-
-Span* PageHeap::AllocLarge(Length n) {
- // find the best span (closest to n in size).
- // The following loops implements address-ordered best-fit.
- Span *best = NULL;
-
- // Search through normal list
- for (Span* span = large_.normal.next;
- span != &large_.normal;
- span = span->next) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- ASSERT(best->location == Span::ON_NORMAL_FREELIST);
- }
- }
- }
-
- // Search through released list in case it has a better fit
- for (Span* span = large_.returned.next;
- span != &large_.returned;
- span = span->next) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- ASSERT(best->location == Span::ON_RETURNED_FREELIST);
- }
- }
- }
-
- return best == NULL ? NULL : Carve(best, n);
-}
-
-Span* PageHeap::Split(Span* span, Length n) {
- ASSERT(0 < n);
- ASSERT(n < span->length);
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->sizeclass == 0);
- Event(span, 'T', n);
-
- const int extra = span->length - n;
- Span* leftover = NewSpan(span->start + n, extra);
- ASSERT(leftover->location == Span::IN_USE);
- Event(leftover, 'U', extra);
- RecordSpan(leftover);
- pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
- span->length = n;
-
- return leftover;
-}
-
-void PageHeap::CommitSpan(Span* span) {
- TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
- committed_bytes_ += span->length << kPageShift;
-}
-
-void PageHeap::DecommitSpan(Span* span) {
- TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
- static_cast<size_t>(span->length << kPageShift));
- committed_bytes_ -= span->length << kPageShift;
-}
-
-Span* PageHeap::Carve(Span* span, Length n) {
- ASSERT(n > 0);
- ASSERT(span->location != Span::IN_USE);
- const int old_location = span->location;
- DLL_Remove(span);
- span->location = Span::IN_USE;
- Event(span, 'A', n);
-
- const int extra = span->length - n;
- ASSERT(extra >= 0);
- if (extra > 0) {
- Span* leftover = NewSpan(span->start + n, extra);
- leftover->location = old_location;
- Event(leftover, 'S', extra);
- RecordSpan(leftover);
-
- // Place leftover span on appropriate free list
- SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_;
- Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST
- ? &listpair->returned : &listpair->normal);
- DLL_Prepend(dst, leftover);
-
- span->length = n;
- pagemap_.set(span->start + n - 1, span);
- }
- ASSERT(Check());
- free_pages_ -= n;
- if (old_location == Span::ON_RETURNED_FREELIST) {
- // We need to recommit this address space.
- CommitSpan(span);
- }
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->length == n);
- return span;
-}
-
-void PageHeap::Delete(Span* span) {
- ASSERT(Check());
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->length > 0);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start + span->length - 1) == span);
- span->sizeclass = 0;
- span->sample = 0;
-
- // Coalesce -- we guarantee that "p" != 0, so no bounds checking
- // necessary. We do not bother resetting the stale pagemap
- // entries for the pieces we are merging together because we only
- // care about the pagemap entries for the boundaries.
- //
- // Note that the adjacent spans we merge into "span" may come out of a
- // "normal" (committed) list, and cleanly merge with our IN_USE span, which
- // is implicitly committed. If the adjacents spans are on the "returned"
- // (decommitted) list, then we must get both spans into the same state before
- // or after we coalesce them. The current code always decomits. This is
- // achieved by blindly decommitting the entire coalesced region, which may
- // include any combination of committed and decommitted spans, at the end of
- // the method.
-
- // TODO(jar): "Always decommit" causes some extra calls to commit when we are
- // called in GrowHeap() during an allocation :-/. We need to eval the cost of
- // that oscillation, and possibly do something to reduce it.
-
- // TODO(jar): We need a better strategy for deciding to commit, or decommit,
- // based on memory usage and free heap sizes.
-
- const PageID p = span->start;
- const Length n = span->length;
- Span* prev = GetDescriptor(p-1);
- if (prev != NULL && prev->location != Span::IN_USE) {
- // Merge preceding span into this span
- ASSERT(prev->start + prev->length == p);
- const Length len = prev->length;
- if (prev->location == Span::ON_RETURNED_FREELIST) {
- // We're about to put the merge span into the returned freelist and call
- // DecommitSpan() on it, which will mark the entire span including this
- // one as released and decrease committed_bytes_ by the size of the
- // merged span. To make the math work out we temporarily increase the
- // committed_bytes_ amount.
- committed_bytes_ += prev->length << kPageShift;
- }
- DLL_Remove(prev);
- DeleteSpan(prev);
- span->start -= len;
- span->length += len;
- pagemap_.set(span->start, span);
- Event(span, 'L', len);
- }
- Span* next = GetDescriptor(p+n);
- if (next != NULL && next->location != Span::IN_USE) {
- // Merge next span into this span
- ASSERT(next->start == p+n);
- const Length len = next->length;
- if (next->location == Span::ON_RETURNED_FREELIST) {
- // See the comment below 'if (prev->location ...' for explanation.
- committed_bytes_ += next->length << kPageShift;
- }
- DLL_Remove(next);
- DeleteSpan(next);
- span->length += len;
- pagemap_.set(span->start + span->length - 1, span);
- Event(span, 'R', len);
- }
-
- Event(span, 'D', span->length);
- span->location = Span::ON_RETURNED_FREELIST;
- DecommitSpan(span);
- if (span->length < kMaxPages) {
- DLL_Prepend(&free_[span->length].returned, span);
- } else {
- DLL_Prepend(&large_.returned, span);
- }
- free_pages_ += n;
-
- IncrementalScavenge(n);
- ASSERT(Check());
-}
-
-void PageHeap::IncrementalScavenge(Length n) {
- // Fast path; not yet time to release memory
- scavenge_counter_ -= n;
- if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
-
- // Never delay scavenging for more than the following number of
- // deallocated pages. With 4K pages, this comes to 4GB of
- // deallocation.
- // Chrome: Changed to 64MB
- static const int kMaxReleaseDelay = 1 << 14;
-
- // If there is nothing to release, wait for so many pages before
- // scavenging again. With 4K pages, this comes to 1GB of memory.
- // Chrome: Changed to 16MB
- static const int kDefaultReleaseDelay = 1 << 12;
-
- const double rate = FLAGS_tcmalloc_release_rate;
- if (rate <= 1e-6) {
- // Tiny release rate means that releasing is disabled.
- scavenge_counter_ = kDefaultReleaseDelay;
- return;
- }
-
- // Find index of free list to scavenge
- int index = scavenge_index_ + 1;
- for (int i = 0; i < kMaxPages+1; i++) {
- if (index > kMaxPages) index = 0;
- SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
- if (!DLL_IsEmpty(&slist->normal)) {
- // Release the last span on the normal portion of this list
- Span* s = slist->normal.prev;
- ASSERT(s->location == Span::ON_NORMAL_FREELIST);
- DLL_Remove(s);
- DecommitSpan(s);
- s->location = Span::ON_RETURNED_FREELIST;
- DLL_Prepend(&slist->returned, s);
-
- // Compute how long to wait until we return memory.
- // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
- // after releasing one page.
- const double mult = 1000.0 / rate;
- double wait = mult * static_cast<double>(s->length);
- if (wait > kMaxReleaseDelay) {
- // Avoid overflow and bound to reasonable range
- wait = kMaxReleaseDelay;
- }
- scavenge_counter_ = static_cast<int64_t>(wait);
-
- scavenge_index_ = index; // Scavenge at index+1 next time
- // Note: we stop scavenging after finding one.
- return;
- }
- index++;
- }
-
- // Nothing to scavenge, delay for a while
- scavenge_counter_ = kDefaultReleaseDelay;
-}
-
-void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
- // Associate span object with all interior pages as well
- ASSERT(span->location == Span::IN_USE);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start+span->length-1) == span);
- Event(span, 'C', sc);
- span->sizeclass = sc;
- for (Length i = 1; i < span->length-1; i++) {
- pagemap_.set(span->start+i, span);
- }
-}
-
-static double PagesToMB(uint64_t pages) {
- return (pages << kPageShift) / 1048576.0;
-}
-
-void PageHeap::Dump(TCMalloc_Printer* out) {
- int nonempty_sizes = 0;
- for (int s = 0; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
- nonempty_sizes++;
- }
- }
- out->printf("------------------------------------------------\n");
- out->printf("PageHeap: %d sizes; %6.1f MB free\n",
- nonempty_sizes, PagesToMB(free_pages_));
- out->printf("------------------------------------------------\n");
- uint64_t total_normal = 0;
- uint64_t total_returned = 0;
- for (int s = 0; s < kMaxPages; s++) {
- const int n_length = DLL_Length(&free_[s].normal);
- const int r_length = DLL_Length(&free_[s].returned);
- if (n_length + r_length > 0) {
- uint64_t n_pages = s * n_length;
- uint64_t r_pages = s * r_length;
- total_normal += n_pages;
- total_returned += r_pages;
- out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
- "; unmapped: %6.1f MB; %6.1f MB cum\n",
- s,
- (n_length + r_length),
- PagesToMB(n_pages + r_pages),
- PagesToMB(total_normal + total_returned),
- PagesToMB(r_pages),
- PagesToMB(total_returned));
- }
- }
-
- uint64_t n_pages = 0;
- uint64_t r_pages = 0;
- int n_spans = 0;
- int r_spans = 0;
- out->printf("Normal large spans:\n");
- for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
- out->printf(" [ %6" PRIuPTR " pages ] %6.1f MB\n",
- s->length, PagesToMB(s->length));
- n_pages += s->length;
- n_spans++;
- }
- out->printf("Unmapped large spans:\n");
- for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
- out->printf(" [ %6" PRIuPTR " pages ] %6.1f MB\n",
- s->length, PagesToMB(s->length));
- r_pages += s->length;
- r_spans++;
- }
- total_normal += n_pages;
- total_returned += r_pages;
- out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
- "; unmapped: %6.1f MB; %6.1f MB cum\n",
- (n_spans + r_spans),
- PagesToMB(n_pages + r_pages),
- PagesToMB(total_normal + total_returned),
- PagesToMB(r_pages),
- PagesToMB(total_returned));
-}
-
-static void RecordGrowth(size_t growth) {
- StackTrace* t = Static::stacktrace_allocator()->New();
- t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
- t->size = growth;
- t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks());
- Static::set_growth_stacks(t);
-}
-
-bool PageHeap::GrowHeap(Length n) {
- ASSERT(kMaxPages >= kMinSystemAlloc);
- if (n > kMaxValidPages) return false;
- Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
- size_t actual_size;
- void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- if (ptr == NULL) {
- if (n < ask) {
- // Try growing just "n" pages
- ask = n;
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- }
- if (ptr == NULL) return false;
- }
- ask = actual_size >> kPageShift;
- RecordGrowth(ask << kPageShift);
-
- uint64_t old_system_bytes = system_bytes_;
- system_bytes_ += (ask << kPageShift);
- committed_bytes_ += (ask << kPageShift);
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- ASSERT(p > 0);
-
- // If we have already a lot of pages allocated, just pre allocate a bunch of
- // memory for the page map. This prevents fragmentation by pagemap metadata
- // when a program keeps allocating and freeing large blocks.
-
- if (old_system_bytes < kPageMapBigAllocationThreshold
- && system_bytes_ >= kPageMapBigAllocationThreshold) {
- pagemap_.PreallocateMoreMemory();
- }
-
- // Make sure pagemap_ has entries for all of the new pages.
- // Plus ensure one before and one after so coalescing code
- // does not need bounds-checking.
- if (pagemap_.Ensure(p-1, ask+2)) {
- // Pretend the new area is allocated and then Delete() it to
- // cause any necessary coalescing to occur.
- //
- // We do not adjust free_pages_ here since Delete() will do it for us.
- Span* span = NewSpan(p, ask);
- RecordSpan(span);
- Delete(span);
- ASSERT(Check());
- return true;
- } else {
- // We could not allocate memory within "pagemap_"
- // TODO: Once we can return memory to the system, return the new span
- return false;
- }
-}
-
-bool PageHeap::Check() {
- ASSERT(free_[0].normal.next == &free_[0].normal);
- ASSERT(free_[0].returned.next == &free_[0].returned);
- return true;
-}
-
-bool PageHeap::CheckExpensive() {
- bool result = Check();
- CheckList(&large_.normal, kMaxPages, 1000000000, Span::ON_NORMAL_FREELIST);
- CheckList(&large_.returned, kMaxPages, 1000000000, Span::ON_RETURNED_FREELIST);
- for (Length s = 1; s < kMaxPages; s++) {
- CheckList(&free_[s].normal, s, s, Span::ON_NORMAL_FREELIST);
- CheckList(&free_[s].returned, s, s, Span::ON_RETURNED_FREELIST);
- }
- return result;
-}
-
-bool PageHeap::CheckList(Span* list, Length min_pages, Length max_pages,
- int freelist) {
- for (Span* s = list->next; s != list; s = s->next) {
- CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
- CHECK_CONDITION(s->length >= min_pages);
- CHECK_CONDITION(s->length <= max_pages);
- CHECK_CONDITION(GetDescriptor(s->start) == s);
- CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
- }
- return true;
-}
-
-void PageHeap::ReleaseFreeList(Span* list, Span* returned) {
- // Walk backwards through list so that when we push these
- // spans on the "returned" list, we preserve the order.
- while (!DLL_IsEmpty(list)) {
- Span* s = list->prev;
- DLL_Remove(s);
- DLL_Prepend(returned, s);
- ASSERT(s->location == Span::ON_NORMAL_FREELIST);
- s->location = Span::ON_RETURNED_FREELIST;
- DecommitSpan(s);
- }
-}
-
-void PageHeap::ReleaseFreePages() {
- for (Length s = 0; s < kMaxPages; s++) {
- ReleaseFreeList(&free_[s].normal, &free_[s].returned);
- }
- ReleaseFreeList(&large_.normal, &large_.returned);
- ASSERT(Check());
-}
-
-} // namespace tcmalloc
diff --git a/third_party/tcmalloc/page_heap.h b/third_party/tcmalloc/page_heap.h
deleted file mode 100644
index 100cae4..0000000
--- a/third_party/tcmalloc/page_heap.h
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright (c) 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-
-#ifndef TCMALLOC_PAGE_HEAP_H_
-#define TCMALLOC_PAGE_HEAP_H_
-
-#include <config.h>
-#include "common.h"
-#include "packed-cache-inl.h"
-#include "pagemap.h"
-#include "span.h"
-
-// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
-// you're porting to a system where you really can't get a stacktrace.
-#ifdef NO_TCMALLOC_SAMPLES
- // We use #define so code compiles even if you #include stacktrace.h somehow.
-# define GetStackTrace(stack, depth, skip) (0)
-#else
-# include <google/stacktrace.h>
-#endif
-
-namespace tcmalloc {
-
-// -------------------------------------------------------------------------
-// Map from page-id to per-page data
-// -------------------------------------------------------------------------
-
-// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
-// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
-// because sometimes the sizeclass is all the information we need.
-
-// Selector class -- general selector uses 3-level map
-template <int BITS> class MapSelector {
- public:
- typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
- typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
-};
-
-// A two-level map for 32-bit machines
-template <> class MapSelector<32> {
- public:
- typedef TCMalloc_PageMap2<32-kPageShift> Type;
- typedef PackedCache<32-kPageShift, uint16_t> CacheType;
-};
-
-// -------------------------------------------------------------------------
-// Page-level allocator
-// * Eager coalescing
-//
-// Heap for page-level allocation. We allow allocating and freeing a
-// contiguous runs of pages (called a "span").
-// -------------------------------------------------------------------------
-
-class PageHeap {
- public:
- PageHeap();
-
- // Allocate a run of "n" pages. Returns zero if out of memory.
- // Caller should not pass "n == 0" -- instead, n should have
- // been rounded up already.
- Span* New(Length n);
-
- // Delete the span "[p, p+n-1]".
- // REQUIRES: span was returned by earlier call to New() and
- // has not yet been deleted.
- void Delete(Span* span);
-
- // Mark an allocated span as being used for small objects of the
- // specified size-class.
- // REQUIRES: span was returned by an earlier call to New()
- // and has not yet been deleted.
- void RegisterSizeClass(Span* span, size_t sc);
-
- // Split an allocated span into two spans: one of length "n" pages
- // followed by another span of length "span->length - n" pages.
- // Modifies "*span" to point to the first span of length "n" pages.
- // Returns a pointer to the second span.
- //
- // REQUIRES: "0 < n < span->length"
- // REQUIRES: span->location == IN_USE
- // REQUIRES: span->sizeclass == 0
- Span* Split(Span* span, Length n);
-
- // Return the descriptor for the specified page.
- inline Span* GetDescriptor(PageID p) const {
- return reinterpret_cast<Span*>(pagemap_.get(p));
- }
-
- // Dump state to stderr
- void Dump(TCMalloc_Printer* out);
-
- // Return number of bytes allocated from system
- inline uint64_t SystemBytes() const { return system_bytes_; }
-
- inline uint64_t CommittedBytes() const { return committed_bytes_; }
-
- // Return number of free bytes in heap
- uint64_t FreeBytes() const {
- return (static_cast<uint64_t>(free_pages_) << kPageShift);
- }
-
- bool Check();
- // Like Check() but does some more comprehensive checking.
- bool CheckExpensive();
- bool CheckList(Span* list, Length min_pages, Length max_pages,
- int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
-
- // Release all pages on the free list for reuse by the OS:
- void ReleaseFreePages();
-
- // Return 0 if we have no information, or else the correct sizeclass for p.
- // Reads and writes to pagemap_cache_ do not require locking.
- // The entries are 64 bits on 64-bit hardware and 16 bits on
- // 32-bit hardware, and we don't mind raciness as long as each read of
- // an entry yields a valid entry, not a partially updated entry.
- size_t GetSizeClassIfCached(PageID p) const {
- return pagemap_cache_.GetOrDefault(p, 0);
- }
- void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
-
- private:
- // Allocates a big block of memory for the pagemap once we reach more than
- // 128MB
- static const size_t kPageMapBigAllocationThreshold = 128 << 20;
-
- // Minimum number of pages to fetch from system at a time. Must be
- // significantly bigger than kBlockSize to amortize system-call
- // overhead, and also to reduce external fragementation. Also, we
- // should keep this value big because various incarnations of Linux
- // have small limits on the number of mmap() regions per
- // address-space.
- static const int kMinSystemAlloc = 1 << (20 - kPageShift);
-
- // For all span-lengths < kMaxPages we keep an exact-size list.
- // REQUIRED: kMaxPages >= kMinSystemAlloc;
- static const size_t kMaxPages = kMinSystemAlloc;
-
- // Pick the appropriate map and cache types based on pointer size
- typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
- typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
- PageMap pagemap_;
- mutable PageMapCache pagemap_cache_;
-
- // We segregate spans of a given size into two circular linked
- // lists: one for normal spans, and one for spans whose memory
- // has been returned to the system.
- struct SpanList {
- Span normal;
- Span returned;
- };
-
- // List of free spans of length >= kMaxPages
- SpanList large_;
-
- // Array mapping from span length to a doubly linked list of free spans
- SpanList free_[kMaxPages];
-
- // Number of pages kept in free lists
- uintptr_t free_pages_;
-
- // Bytes allocated from system
- uint64_t system_bytes_;
-
- // Bytes committed, always <= system_bytes_.
- uint64_t committed_bytes_;
-
- bool GrowHeap(Length n);
-
- // REQUIRES: span->length >= n
- // REQUIRES: span->location != IN_USE
- // Remove span from its free list, and move any leftover part of
- // span into appropriate free lists. Also update "span" to have
- // length exactly "n" and mark it as non-free so it can be returned
- // to the client. After all that, decrease free_pages_ by n and
- // return span.
- Span* Carve(Span* span, Length n);
-
- void RecordSpan(Span* span) {
- pagemap_.set(span->start, span);
- if (span->length > 1) {
- pagemap_.set(span->start + span->length - 1, span);
- }
- }
-
- // Allocate a large span of length == n. If successful, returns a
- // span of exactly the specified length. Else, returns NULL.
- Span* AllocLarge(Length n);
-
- // Commit the span.
- void CommitSpan(Span* span);
-
- // Decommit the span.
- void DecommitSpan(Span* span);
-
- // Incrementally release some memory to the system.
- // IncrementalScavenge(n) is called whenever n pages are freed.
- void IncrementalScavenge(Length n);
-
- // Releases all memory held in the given list's 'normal' freelist and adds
- // it to the 'released' freelist.
- void ReleaseFreeList(Span* list, Span* returned);
-
- // Number of pages to deallocate before doing more scavenging
- int64_t scavenge_counter_;
-
- // Index of last free list we scavenged
- int scavenge_index_;
-};
-
-} // namespace tcmalloc
-
-#endif // TCMALLOC_PAGE_HEAP_H_
diff --git a/third_party/tcmalloc/port.cc b/third_party/tcmalloc/port.cc
deleted file mode 100644
index 3902ca7..0000000
--- a/third_party/tcmalloc/port.cc
+++ /dev/null
@@ -1,302 +0,0 @@
-/* Copyright (c) 2007, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Craig Silverstein
- */
-
-#ifndef _WIN32
-# error You should only be including windows/port.cc in a windows environment!
-#endif
-
-#include <config.h>
-#include <string.h> // for strlen(), memset(), memcmp()
-#include <assert.h>
-#include <stdarg.h> // for va_list, va_start, va_end
-#include <windows.h>
-#include "port.h"
-#include "base/logging.h"
-#include "base/spinlock.h"
-#include "system-alloc.h"
-
-// -----------------------------------------------------------------------
-// Basic libraries
-
-// These call the windows _vsnprintf, but always NUL-terminate.
-int safe_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
- if (size == 0) // not even room for a \0?
- return -1; // not what C99 says to do, but what windows does
- str[size-1] = '\0';
- return _vsnprintf(str, size-1, format, ap);
-}
-
-#ifndef HAVE_SNPRINTF
-int snprintf(char *str, size_t size, const char *format, ...) {
- va_list ap;
- va_start(ap, format);
- const int r = vsnprintf(str, size, format, ap);
- va_end(ap);
- return r;
-}
-#endif
-
-int getpagesize() {
- static int pagesize = 0;
- if (pagesize == 0) {
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- pagesize = std::max(system_info.dwPageSize,
- system_info.dwAllocationGranularity);
- }
- return pagesize;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* __sbrk(ptrdiff_t increment) {
- LOG(FATAL, "Windows doesn't implement sbrk!\n");
- return NULL;
-}
-
-// -----------------------------------------------------------------------
-// Threads code
-
-bool CheckIfKernelSupportsTLS() {
- // TODO(csilvers): return true (all win's since win95, at least, support this)
- return false;
-}
-
-// Windows doesn't support pthread_key_create's destr_function, and in
-// fact it's a bit tricky to get code to run when a thread exits. This
-// is cargo-cult magic from http://www.codeproject.com/threads/tls.asp.
-// This code is for VC++ 7.1 and later; VC++ 6.0 support is possible
-// but more busy-work -- see the webpage for how to do it. If all
-// this fails, we could use DllMain instead. The big problem with
-// DllMain is it doesn't run if this code is statically linked into a
-// binary (it also doesn't run if the thread is terminated via
-// TerminateThread, which if we're lucky this routine does).
-
-// This makes the linker create the TLS directory if it's not already
-// there (that is, even if __declspec(thead) is not used).
-#ifdef _MSC_VER
-#pragma comment(linker, "/INCLUDE:__tls_used")
-#endif
-
-// When destr_fn eventually runs, it's supposed to take as its
-// argument the tls-value associated with key that pthread_key_create
-// creates. (Yeah, it sounds confusing but it's really not.) We
-// store the destr_fn/key pair in this data structure. Because we
-// store this in a single var, this implies we can only have one
-// destr_fn in a program! That's enough in practice. If asserts
-// trigger because we end up needing more, we'll have to turn this
-// into an array.
-struct DestrFnClosure {
- void (*destr_fn)(void*);
- pthread_key_t key_for_destr_fn_arg;
-};
-
-static DestrFnClosure destr_fn_info; // initted to all NULL/0.
-
-static int on_process_term(void) {
- if (destr_fn_info.destr_fn) {
- void *ptr = TlsGetValue(destr_fn_info.key_for_destr_fn_arg);
- // This shouldn't be necessary, but in Release mode, Windows
- // sometimes trashes the pointer in the TLS slot, so we need to
- // remove the pointer from the TLS slot before the thread dies.
- TlsSetValue(destr_fn_info.key_for_destr_fn_arg, NULL);
- if (ptr) // pthread semantics say not to call if ptr is NULL
- (*destr_fn_info.destr_fn)(ptr);
- }
- return 0;
-}
-
-static void NTAPI on_tls_callback(HINSTANCE h, DWORD dwReason, PVOID pv) {
- if (dwReason == DLL_THREAD_DETACH) { // thread is being destroyed!
- on_process_term();
- }
-}
-
-#ifdef _MSC_VER
-
-// This tells the linker to run these functions.
-#pragma data_seg(push, old_seg)
-#pragma data_seg(".CRT$XLB")
-static void (NTAPI *p_thread_callback)(HINSTANCE h, DWORD dwReason, PVOID pv)
- = on_tls_callback;
-#pragma data_seg(".CRT$XTU")
-static int (*p_process_term)(void) = on_process_term;
-#pragma data_seg(pop, old_seg)
-
-#else // #ifdef _MSC_VER [probably msys/mingw]
-
-// We have to try the DllMain solution here, because we can't use the
-// msvc-specific pragmas.
-BOOL WINAPI DllMain(HINSTANCE h, DWORD dwReason, PVOID pv) {
- if (dwReason == DLL_THREAD_DETACH)
- on_tls_callback(h, dwReason, pv);
- else if (dwReason == DLL_PROCESS_DETACH)
- on_process_term();
- return TRUE;
-}
-
-#endif // #ifdef _MSC_VER
-
-pthread_key_t PthreadKeyCreate(void (*destr_fn)(void*)) {
- // Semantics are: we create a new key, and then promise to call
- // destr_fn with TlsGetValue(key) when the thread is destroyed
- // (as long as TlsGetValue(key) is not NULL).
- pthread_key_t key = TlsAlloc();
- if (destr_fn) { // register it
- // If this assert fails, we'll need to support an array of destr_fn_infos
- assert(destr_fn_info.destr_fn == NULL);
- destr_fn_info.destr_fn = destr_fn;
- destr_fn_info.key_for_destr_fn_arg = key;
- }
- return key;
-}
-
-
-// -----------------------------------------------------------------------
-// These functions replace system-alloc.cc
-
-static SpinLock alloc_lock(SpinLock::LINKER_INITIALIZED);
-
-// This is mostly like MmapSysAllocator::Alloc, except it does these weird
-// munmap's in the middle of the page, which is forbidden in windows.
-extern void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size,
- size_t alignment) {
- SpinLockHolder sh(&alloc_lock);
- // Align on the pagesize boundary
- const int pagesize = getpagesize();
- if (alignment < pagesize) alignment = pagesize;
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // Report the total number of bytes the OS actually delivered. This might be
- // greater than |size| because of alignment concerns. The full size is
- // necessary so that adjacent spans can be coalesced.
- // TODO(antonm): proper processing of alignments
- // in actual_size and decommitting.
- if (actual_size) {
- *actual_size = size;
- }
-
- // We currently do not support alignments larger than the pagesize or
- // alignments that are not multiples of the pagesize after being floored.
- // If this ability is needed it can be done by the caller (assuming it knows
- // the page size).
- assert(alignment <= pagesize);
-
- void* result = VirtualAlloc(0, size,
- MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
- if (result == NULL)
- return NULL;
-
- // If the result is not aligned memory fragmentation will result which can
- // lead to pathological memory use.
- assert((reinterpret_cast<uintptr_t>(result) & (alignment - 1)) == 0);
-
- return result;
-}
-
-void TCMalloc_SystemRelease(void* start, size_t length) {
- if (VirtualFree(start, length, MEM_DECOMMIT))
- return;
-
- // The decommit may fail if the memory region consists of allocations
- // from more than one call to VirtualAlloc. In this case, fall back to
- // using VirtualQuery to retrieve the allocation boundaries and decommit
- // them each individually.
-
- char* ptr = static_cast<char*>(start);
- char* end = ptr + length;
- MEMORY_BASIC_INFORMATION info;
- while (ptr < end) {
- size_t resultSize = VirtualQuery(ptr, &info, sizeof(info));
- assert(resultSize == sizeof(info));
- size_t decommitSize = std::min<size_t>(info.RegionSize, end - ptr);
- BOOL success = VirtualFree(ptr, decommitSize, MEM_DECOMMIT);
- assert(success == TRUE);
- ptr += decommitSize;
- }
-}
-
-void TCMalloc_SystemCommit(void* start, size_t length)
-{
- if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start)
- return;
-
- // The commit may fail if the memory region consists of allocations
- // from more than one call to VirtualAlloc. In this case, fall back to
- // using VirtualQuery to retrieve the allocation boundaries and commit them
- // each individually.
-
- char* ptr = static_cast<char*>(start);
- char* end = ptr + length;
- MEMORY_BASIC_INFORMATION info;
- while (ptr < end) {
- size_t resultSize = VirtualQuery(ptr, &info, sizeof(info));
- assert(resultSize == sizeof(info));
-
- size_t commitSize = std::min<size_t>(info.RegionSize, end - ptr);
- void* newAddress = VirtualAlloc(ptr, commitSize, MEM_COMMIT,
- PAGE_READWRITE);
- assert(newAddress == ptr);
- ptr += commitSize;
- }
-}
-
-bool RegisterSystemAllocator(SysAllocator *allocator, int priority) {
- return false; // we don't allow registration on windows, right now
-}
-
-void DumpSystemAllocatorStats(TCMalloc_Printer* printer) {
- // We don't dump stats on windows, right now
-}
-
-
-// -----------------------------------------------------------------------
-// These functions rework existing functions of the same name in the
-// Google codebase.
-
-// A replacement for HeapProfiler::CleanupOldProfiles.
-void DeleteMatchingFiles(const char* prefix, const char* full_glob) {
- WIN32_FIND_DATAA found; // that final A is for Ansi (as opposed to Unicode)
- HANDLE hFind = FindFirstFileA(full_glob, &found); // A is for Ansi
- if (hFind != INVALID_HANDLE_VALUE) {
- const int prefix_length = strlen(prefix);
- do {
- const char *fname = found.cFileName;
- if ((strlen(fname) >= prefix_length) &&
- (memcmp(fname, prefix, prefix_length) == 0)) {
- RAW_VLOG(0, "Removing old heap profile %s\n", fname);
- // TODO(csilvers): we really need to unlink dirname + fname
- _unlink(fname);
- }
- } while (FindNextFileA(hFind, &found) != FALSE); // A is for Ansi
- FindClose(hFind);
- }
-}
diff --git a/third_party/tcmalloc/prep_libc.sh b/third_party/tcmalloc/prep_libc.sh
deleted file mode 100644
index 5a361ac..0000000
--- a/third_party/tcmalloc/prep_libc.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-# This script takes libcmt.lib for VS2005 and removes the allocation related
-# functions from it.
-#
-# Usage: prep_libcmt.bat <VCInstallDir> <OutputFile>
-#
-# VCInstallDir is the path where VC is installed, typically:
-# C:\Program Files\Microsoft Visual Studio 8\VC\
-#
-# OutputFile is the directory where the modified libcmt file should be stored.
-#
-
-LIBCMT="${1}\\libcmt.lib"
-LIBCMTPDB="${1}\\libcmt.pdb"
-OUTDIR=$2
-OUTCMT="${2}\\libcmt.lib"
-
-mkdir -p $OUTDIR
-cp "$LIBCMT" "$OUTDIR"
-cp "$LIBCMTPDB" "$OUTDIR"
-
-
-# We'll remove the symbols based on paths found in either the VS2005 or VS2008
-# libcmt.lib files.
-LIBCMTSRCPATHVS2005="build\\intel\\mt_obj\\"
-LIBCMTSRCPATHVS2008="f:\\dd\\vctools\\crt_bld\\SELF_X86\\crt\\src\\build\\INTEL\\mt_obj\\"
-
-OBJFILES="malloc.obj free.obj realloc.obj new.obj delete.obj new2.obj delete2.obj align.obj msize.obj heapinit.obj expand.obj heapchk.obj heapwalk.obj heapmin.obj sbheap.obj calloc.obj recalloc.obj calloc_impl.obj new_mode.obj newopnt.obj"
-
-for FILE in $OBJFILES
-do
- echo ${FILE}
- LIB /NOLOGO /IGNORE:4006,4014,4221 /REMOVE:${LIBCMTSRCPATHVS2005}${FILE} $OUTCMT
- LIB /NOLOGO /IGNORE:4006,4014,4221 /REMOVE:${LIBCMTSRCPATHVS2008}${FILE} $OUTCMT
-done
diff --git a/third_party/tcmalloc/symbolize_linux.cc b/third_party/tcmalloc/symbolize_linux.cc
deleted file mode 100644
index 9c15e02..0000000
--- a/third_party/tcmalloc/symbolize_linux.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright (c) 2009, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Craig Silverstein
-//
-// This forks out to pprof to do the actual symbolizing. We might
-// be better off writing our own in C++.
-
-#include "config.h"
-#ifdef HAVE_INTTYPES_H
-#include <inttypes.h>
-#endif
-#include "symbolize.h"
-#include <stdlib.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h> // for write()
-#endif
-#ifdef HAVE_SYS_SOCKET_H
-#include <sys/socket.h> // for socketpair() -- needed by Symbolize
-#endif
-#ifdef HAVE_SYS_WAIT_H
-#include <sys/wait.h> // for wait() -- needed by Symbolize
-#endif
-#ifdef HAVE_POLL_H
-#include <poll.h>
-#endif
-#include <string>
-#include "base/commandlineflags.h"
-#include "base/sysinfo.h"
-
-using std::string;
-using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
-
-
-DEFINE_string(symbolize_pprof,
- EnvToString("PPROF_PATH", "pprof"),
- "Path to pprof to call for reporting function names.");
-
-// heap_profile_table_pprof may be referenced after destructors are
-// called (since that's when leak-checking is done), so we make
-// a more-permanent copy that won't ever get destroyed.
-static string* g_pprof_path = new string(FLAGS_symbolize_pprof);
-
-// Updates symbolization_table with the pointers to symbol names corresponding
-// to its keys. The symbol names are stored in out, which is allocated and
-// freed by the caller of this routine.
-// Note that the forking/etc is not thread-safe or re-entrant. That's
-// ok for the purpose we need -- reporting leaks detected by heap-checker
-// -- but be careful if you decide to use this routine for other purposes.
-extern bool Symbolize(char *out, int out_size,
- SymbolMap *symbolization_table) {
-#if !defined(HAVE_UNISTD_H) || !defined(HAVE_SYS_SOCKET_H) || !defined(HAVE_SYS_WAIT_H)
- return false;
-#elif !defined(HAVE_PROGRAM_INVOCATION_NAME)
- return false; // TODO(csilvers): get argv[0] somehow
-#else
- // All this work is to do two-way communication. ugh.
- extern char* program_invocation_name; // gcc provides this
- int child_in[2]; // file descriptors
- int child_out[2]; // for now, we don't worry about child_err
- if (socketpair(AF_UNIX, SOCK_STREAM, 0, child_in) == -1) {
- return false;
- }
- if (socketpair(AF_UNIX, SOCK_STREAM, 0, child_out) == -1) {
- close(child_in[0]);
- close(child_in[1]);
- return false;
- }
- switch (fork()) {
- case -1: { // error
- close(child_in[0]);
- close(child_in[1]);
- close(child_out[0]);
- close(child_out[1]);
- return false;
- }
- case 0: { // child
- close(child_in[1]); // child uses the 0's, parent uses the 1's
- close(child_out[1]); // child uses the 0's, parent uses the 1's
- close(0);
- close(1);
- if (dup2(child_in[0], 0) == -1) _exit(1);
- if (dup2(child_out[0], 1) == -1) _exit(2);
- // Unset vars that might cause trouble when we fork
- unsetenv("CPUPROFILE");
- unsetenv("HEAPPROFILE");
- unsetenv("HEAPCHECK");
- unsetenv("PERFTOOLS_VERBOSE");
- execlp(g_pprof_path->c_str(), g_pprof_path->c_str(),
- "--symbols", program_invocation_name, NULL);
- _exit(3); // if execvp fails, it's bad news for us
- }
- default: { // parent
- close(child_in[0]); // child uses the 0's, parent uses the 1's
- close(child_out[0]); // child uses the 0's, parent uses the 1's
-#ifdef HAVE_POLL_H
- // For maximum safety, we check to make sure the execlp
- // succeeded before trying to write. (Otherwise we'll get a
- // SIGPIPE.) For systems without poll.h, we'll just skip this
- // check, and trust that the user set PPROF_PATH correctly!
- struct pollfd pfd = { child_in[1], POLLOUT, 0 };
- if (!poll(&pfd, 1, 0) || !(pfd.revents & POLLOUT) ||
- (pfd.revents & (POLLHUP|POLLERR))) {
- return false;
- }
-#endif
- DumpProcSelfMaps(child_in[1]); // what pprof expects on stdin
-
- char pcstr[64]; // enough for a single address
- for (SymbolMap::const_iterator iter = symbolization_table->begin();
- iter != symbolization_table->end(); ++iter) {
- snprintf(pcstr, sizeof(pcstr), // pprof expects format to be 0xXXXXXX
- "0x%" PRIxPTR "\n", iter->first);
- // TODO(glider): the number of write()s can be reduced by using
- // snprintf() here.
- ssize_t size = write(child_in[1], pcstr, strlen(pcstr));
- // Use size so we don't get an unused variable warning.
- CHECK(size);
- }
- close(child_in[1]); // that's all we need to write
-
- int total_bytes_read = 0;
- memset(out, '\0', out_size);
- while (1) {
- int bytes_read = read(child_out[1], out + total_bytes_read,
- out_size - total_bytes_read);
- if (bytes_read < 0) {
- close(child_out[1]);
- return false;
- } else if (bytes_read == 0) {
- close(child_out[1]);
- wait(NULL);
- break;
- } else {
- total_bytes_read += bytes_read;
- }
- }
- // We have successfully read the output of pprof into out. Make sure
- // the last symbol is full (we can tell because it ends with a \n).
- // TODO(glider): even when the last symbol is full, the list of symbols
- // may be incomplete. We should check for that and return the number of
- // symbols we actually get from pprof.
- if (total_bytes_read == 0 || out[total_bytes_read - 1] != '\n')
- return false;
- // make the symbolization_table values point to the output vector
- SymbolMap::iterator fill = symbolization_table->begin();
- char *current_name = out;
- for (int i = 0; i < total_bytes_read; i++) {
- if (out[i] == '\n') {
- fill->second = current_name;
- out[i] = '\0';
- current_name = out + i + 1;
- fill++;
- }
- }
- return true;
- }
- }
- return false; // shouldn't be reachable
-#endif
-}
diff --git a/third_party/tcmalloc/system-alloc.cc b/third_party/tcmalloc/system-alloc.cc
deleted file mode 100644
index 21d9b43..0000000
--- a/third_party/tcmalloc/system-alloc.cc
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat
-
-#include <config.h>
-#if defined HAVE_STDINT_H
-#include <stdint.h>
-#elif defined HAVE_INTTYPES_H
-#include <inttypes.h>
-#else
-#include <sys/types.h>
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <fcntl.h> // for open()
-#ifdef HAVE_MMAP
-#include <sys/mman.h>
-#endif
-#include <errno.h>
-#include "system-alloc.h"
-#include "internal_logging.h"
-#include "base/logging.h"
-#include "base/commandlineflags.h"
-#include "base/spinlock.h"
-
-// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
-// form of the name instead.
-#ifndef MAP_ANONYMOUS
-# define MAP_ANONYMOUS MAP_ANON
-#endif
-
-// Solaris has a bug where it doesn't declare madvise() for C++.
-// http://www.opensolaris.org/jive/thread.jspa?threadID=21035&tstart=0
-#if defined(__sun) && defined(__SVR4)
-# include <sys/types.h> // for caddr_t
- extern "C" { extern int madvise(caddr_t, size_t, int); }
-#endif
-
-// Set kDebugMode mode so that we can have use C++ conditionals
-// instead of preprocessor conditionals.
-#ifdef NDEBUG
-static const bool kDebugMode = false;
-#else
-static const bool kDebugMode = true;
-#endif
-
-// Structure for discovering alignment
-union MemoryAligner {
- void* p;
- double d;
- size_t s;
-};
-
-static SpinLock spinlock(SpinLock::LINKER_INITIALIZED);
-
-#if defined(HAVE_MMAP) || defined(MADV_DONTNEED)
-// Page size is initialized on demand (only needed for mmap-based allocators)
-static size_t pagesize = 0;
-#endif
-
-// Configuration parameters.
-
-DEFINE_int32(malloc_devmem_start,
- EnvToInt("TCMALLOC_DEVMEM_START", 0),
- "Physical memory starting location in MB for /dev/mem allocation."
- " Setting this to 0 disables /dev/mem allocation");
-DEFINE_int32(malloc_devmem_limit,
- EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0),
- "Physical memory limit location in MB for /dev/mem allocation."
- " Setting this to 0 means no limit.");
-DEFINE_bool(malloc_skip_sbrk,
- EnvToBool("TCMALLOC_SKIP_SBRK", false),
- "Whether sbrk can be used to obtain memory.");
-DEFINE_bool(malloc_skip_mmap,
- EnvToBool("TCMALLOC_SKIP_MMAP", false),
- "Whether mmap can be used to obtain memory.");
-
-// static allocators
-class SbrkSysAllocator : public SysAllocator {
-public:
- SbrkSysAllocator() : SysAllocator() {
- }
- void* Alloc(size_t size, size_t *actual_size, size_t alignment);
- void DumpStats(TCMalloc_Printer* printer);
-};
-static char sbrk_space[sizeof(SbrkSysAllocator)];
-
-class MmapSysAllocator : public SysAllocator {
-public:
- MmapSysAllocator() : SysAllocator() {
- }
- void* Alloc(size_t size, size_t *actual_size, size_t alignment);
- void DumpStats(TCMalloc_Printer* printer);
-};
-static char mmap_space[sizeof(MmapSysAllocator)];
-
-class DevMemSysAllocator : public SysAllocator {
-public:
- DevMemSysAllocator() : SysAllocator() {
- }
- void* Alloc(size_t size, size_t *actual_size, size_t alignment);
- void DumpStats(TCMalloc_Printer* printer);
-};
-static char devmem_space[sizeof(DevMemSysAllocator)];
-
-static const int kStaticAllocators = 3;
-// kMaxDynamicAllocators + kStaticAllocators;
-static const int kMaxAllocators = 5;
-static SysAllocator *allocators[kMaxAllocators];
-
-bool RegisterSystemAllocator(SysAllocator *a, int priority) {
- SpinLockHolder lock_holder(&spinlock);
-
- // No two allocators should have a priority conflict, since the order
- // is determined at compile time.
- CHECK_CONDITION(allocators[priority] == NULL);
- allocators[priority] = a;
- return true;
-}
-
-
-void* SbrkSysAllocator::Alloc(size_t size, size_t *actual_size,
- size_t alignment) {
- // Check if we should use sbrk allocation.
- // FLAGS_malloc_skip_sbrk starts out as false (its uninitialized
- // state) and eventually gets initialized to the specified value. Note
- // that this code runs for a while before the flags are initialized.
- // That means that even if this flag is set to true, some (initial)
- // memory will be allocated with sbrk before the flag takes effect.
- if (FLAGS_malloc_skip_sbrk) {
- return NULL;
- }
-
- // sbrk will release memory if passed a negative number, so we do
- // a strict check here
- if (static_cast<ptrdiff_t>(size + alignment) < 0) return NULL;
-
- // could theoretically return the "extra" bytes here, but this
- // is simple and correct.
- if (actual_size) {
- *actual_size = size;
- }
-
- // This doesn't overflow because TCMalloc_SystemAlloc has already
- // tested for overflow at the alignment boundary.
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // Check that we we're not asking for so much more memory that we'd
- // wrap around the end of the virtual address space. (This seems
- // like something sbrk() should check for us, and indeed opensolaris
- // does, but glibc does not:
- // http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/sys/sbrk.c?a=true
- // http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/libc/misc/sbrk.c?rev=1.1.2.1&content-type=text/plain&cvsroot=glibc
- // Without this check, sbrk may succeed when it ought to fail.)
- if (reinterpret_cast<intptr_t>(sbrk(0)) + size < size) {
- failed_ = true;
- return NULL;
- }
-
- void* result = sbrk(size);
- if (result == reinterpret_cast<void*>(-1)) {
- failed_ = true;
- return NULL;
- }
-
- // Is it aligned?
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
- if ((ptr & (alignment-1)) == 0) return result;
-
- // Try to get more memory for alignment
- size_t extra = alignment - (ptr & (alignment-1));
- void* r2 = sbrk(extra);
- if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) {
- // Contiguous with previous result
- return reinterpret_cast<void*>(ptr + extra);
- }
-
- // Give up and ask for "size + alignment - 1" bytes so
- // that we can find an aligned region within it.
- result = sbrk(size + alignment - 1);
- if (result == reinterpret_cast<void*>(-1)) {
- failed_ = true;
- return NULL;
- }
- ptr = reinterpret_cast<uintptr_t>(result);
- if ((ptr & (alignment-1)) != 0) {
- ptr += alignment - (ptr & (alignment-1));
- }
- return reinterpret_cast<void*>(ptr);
-}
-
-void SbrkSysAllocator::DumpStats(TCMalloc_Printer* printer) {
- printer->printf("SbrkSysAllocator: failed_=%d\n", failed_);
-}
-
-void* MmapSysAllocator::Alloc(size_t size, size_t *actual_size,
- size_t alignment) {
-#ifndef HAVE_MMAP
- failed_ = true;
- return NULL;
-#else
- // Check if we should use mmap allocation.
- // FLAGS_malloc_skip_mmap starts out as false (its uninitialized
- // state) and eventually gets initialized to the specified value. Note
- // that this code runs for a while before the flags are initialized.
- // Chances are we never get here before the flags are initialized since
- // sbrk is used until the heap is exhausted (before mmap is used).
- if (FLAGS_malloc_skip_mmap) {
- return NULL;
- }
-
- // could theoretically return the "extra" bytes here, but this
- // is simple and correct.
- if (actual_size) {
- *actual_size = size;
- }
-
- // Enforce page alignment
- if (pagesize == 0) pagesize = getpagesize();
- if (alignment < pagesize) alignment = pagesize;
- size_t aligned_size = ((size + alignment - 1) / alignment) * alignment;
- if (aligned_size < size) {
- return NULL;
- }
- size = aligned_size;
-
- // Ask for extra memory if alignment > pagesize
- size_t extra = 0;
- if (alignment > pagesize) {
- extra = alignment - pagesize;
- }
-
- // Note: size + extra does not overflow since:
- // size + alignment < (1<<NBITS).
- // and extra <= alignment
- // therefore size + extra < (1<<NBITS)
- void* result = mmap(NULL, size + extra,
- PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS,
- -1, 0);
- if (result == reinterpret_cast<void*>(MAP_FAILED)) {
- failed_ = true;
- return NULL;
- }
-
- // Adjust the return memory so it is aligned
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
- size_t adjust = 0;
- if ((ptr & (alignment - 1)) != 0) {
- adjust = alignment - (ptr & (alignment - 1));
- }
-
- // Return the unused memory to the system
- if (adjust > 0) {
- munmap(reinterpret_cast<void*>(ptr), adjust);
- }
- if (adjust < extra) {
- munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
- }
-
- ptr += adjust;
- return reinterpret_cast<void*>(ptr);
-#endif // HAVE_MMAP
-}
-
-void MmapSysAllocator::DumpStats(TCMalloc_Printer* printer) {
- printer->printf("MmapSysAllocator: failed_=%d\n", failed_);
-}
-
-void* DevMemSysAllocator::Alloc(size_t size, size_t *actual_size,
- size_t alignment) {
-#ifndef HAVE_MMAP
- failed_ = true;
- return NULL;
-#else
- static bool initialized = false;
- static off_t physmem_base; // next physical memory address to allocate
- static off_t physmem_limit; // maximum physical address allowed
- static int physmem_fd; // file descriptor for /dev/mem
-
- // Check if we should use /dev/mem allocation. Note that it may take
- // a while to get this flag initialized, so meanwhile we fall back to
- // the next allocator. (It looks like 7MB gets allocated before
- // this flag gets initialized -khr.)
- if (FLAGS_malloc_devmem_start == 0) {
- // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to
- // try us again next time.
- return NULL;
- }
-
- if (!initialized) {
- physmem_fd = open("/dev/mem", O_RDWR);
- if (physmem_fd < 0) {
- failed_ = true;
- return NULL;
- }
- physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL;
- physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL;
- initialized = true;
- }
-
- // could theoretically return the "extra" bytes here, but this
- // is simple and correct.
- if (actual_size) {
- *actual_size = size;
- }
-
- // Enforce page alignment
- if (pagesize == 0) pagesize = getpagesize();
- if (alignment < pagesize) alignment = pagesize;
- size_t aligned_size = ((size + alignment - 1) / alignment) * alignment;
- if (aligned_size < size) {
- return NULL;
- }
- size = aligned_size;
-
- // Ask for extra memory if alignment > pagesize
- size_t extra = 0;
- if (alignment > pagesize) {
- extra = alignment - pagesize;
- }
-
- // check to see if we have any memory left
- if (physmem_limit != 0 &&
- ((size + extra) > (physmem_limit - physmem_base))) {
- failed_ = true;
- return NULL;
- }
-
- // Note: size + extra does not overflow since:
- // size + alignment < (1<<NBITS).
- // and extra <= alignment
- // therefore size + extra < (1<<NBITS)
- void *result = mmap(0, size + extra, PROT_WRITE|PROT_READ,
- MAP_SHARED, physmem_fd, physmem_base);
- if (result == reinterpret_cast<void*>(MAP_FAILED)) {
- failed_ = true;
- return NULL;
- }
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
-
- // Adjust the return memory so it is aligned
- size_t adjust = 0;
- if ((ptr & (alignment - 1)) != 0) {
- adjust = alignment - (ptr & (alignment - 1));
- }
-
- // Return the unused virtual memory to the system
- if (adjust > 0) {
- munmap(reinterpret_cast<void*>(ptr), adjust);
- }
- if (adjust < extra) {
- munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
- }
-
- ptr += adjust;
- physmem_base += adjust + size;
-
- return reinterpret_cast<void*>(ptr);
-#endif // HAVE_MMAP
-}
-
-void DevMemSysAllocator::DumpStats(TCMalloc_Printer* printer) {
- printer->printf("DevMemSysAllocator: failed_=%d\n", failed_);
-}
-
-static bool system_alloc_inited = false;
-void InitSystemAllocators(void) {
- // This determines the order in which system allocators are called
- int i = kMaxDynamicAllocators;
- allocators[i++] = new (devmem_space) DevMemSysAllocator();
-
- // In 64-bit debug mode, place the mmap allocator first since it
- // allocates pointers that do not fit in 32 bits and therefore gives
- // us better testing of code's 64-bit correctness. It also leads to
- // less false negatives in heap-checking code. (Numbers are less
- // likely to look like pointers and therefore the conservative gc in
- // the heap-checker is less likely to misinterpret a number as a
- // pointer).
- if (kDebugMode && sizeof(void*) > 4) {
- allocators[i++] = new (mmap_space) MmapSysAllocator();
- allocators[i++] = new (sbrk_space) SbrkSysAllocator();
- } else {
- allocators[i++] = new (sbrk_space) SbrkSysAllocator();
- allocators[i++] = new (mmap_space) MmapSysAllocator();
- }
-}
-
-void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size,
- size_t alignment) {
- // Discard requests that overflow
- if (size + alignment < size) return NULL;
-
- SpinLockHolder lock_holder(&spinlock);
-
- if (!system_alloc_inited) {
- InitSystemAllocators();
- system_alloc_inited = true;
- }
-
- // Enforce minimum alignment
- if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner);
-
- // Try twice, once avoiding allocators that failed before, and once
- // more trying all allocators even if they failed before.
- for (int i = 0; i < 2; i++) {
- for (int j = 0; j < kMaxAllocators; j++) {
- SysAllocator *a = allocators[j];
- if (a == NULL) continue;
- if (a->usable_ && !a->failed_) {
- void* result = a->Alloc(size, actual_size, alignment);
- if (result != NULL) return result;
- }
- }
-
- // nothing worked - reset failed_ flags and try again
- for (int j = 0; j < kMaxAllocators; j++) {
- SysAllocator *a = allocators[j];
- if (a == NULL) continue;
- a->failed_ = false;
- }
- }
- return NULL;
-}
-
-void TCMalloc_SystemRelease(void* start, size_t length) {
-#ifdef MADV_DONTNEED
- if (FLAGS_malloc_devmem_start) {
- // It's not safe to use MADV_DONTNEED if we've been mapping
- // /dev/mem for heap memory
- return;
- }
- if (pagesize == 0) pagesize = getpagesize();
- const size_t pagemask = pagesize - 1;
-
- size_t new_start = reinterpret_cast<size_t>(start);
- size_t end = new_start + length;
- size_t new_end = end;
-
- // Round up the starting address and round down the ending address
- // to be page aligned:
- new_start = (new_start + pagesize - 1) & ~pagemask;
- new_end = new_end & ~pagemask;
-
- ASSERT((new_start & pagemask) == 0);
- ASSERT((new_end & pagemask) == 0);
- ASSERT(new_start >= reinterpret_cast<size_t>(start));
- ASSERT(new_end <= end);
-
- if (new_end > new_start) {
- // Note -- ignoring most return codes, because if this fails it
- // doesn't matter...
- while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start,
- MADV_DONTNEED) == -1 &&
- errno == EAGAIN) {
- // NOP
- }
- }
-#endif
-}
-
-void TCMalloc_SystemCommit(void* start, size_t length) {
- // Nothing to do here. TCMalloc_SystemRelease does not alter pages
- // such that they need to be re-committed before they can be used by the
- // application.
-}
-
-void DumpSystemAllocatorStats(TCMalloc_Printer* printer) {
- for (int j = 0; j < kMaxAllocators; j++) {
- SysAllocator *a = allocators[j];
- if (a == NULL) continue;
- if (a->usable_) {
- a->DumpStats(printer);
- }
- }
-}
diff --git a/third_party/tcmalloc/system-alloc.h b/third_party/tcmalloc/system-alloc.h
deleted file mode 100644
index 60affed..0000000
--- a/third_party/tcmalloc/system-alloc.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat
-//
-// Routine that uses sbrk/mmap to allocate memory from the system.
-// Useful for implementing malloc.
-
-#ifndef TCMALLOC_SYSTEM_ALLOC_H_
-#define TCMALLOC_SYSTEM_ALLOC_H_
-
-#include <config.h>
-#include "internal_logging.h"
-
-// REQUIRES: "alignment" is a power of two or "0" to indicate default alignment
-//
-// Allocate and return "N" bytes of zeroed memory.
-//
-// If actual_bytes is NULL then the returned memory is exactly the
-// requested size. If actual bytes is non-NULL then the allocator
-// may optionally return more bytes than asked for (i.e. return an
-// entire "huge" page if a huge page allocator is in use).
-//
-// The returned pointer is a multiple of "alignment" if non-zero.
-//
-// Returns NULL when out of memory.
-extern void* TCMalloc_SystemAlloc(size_t bytes, size_t *actual_bytes,
- size_t alignment = 0);
-
-// This call is a hint to the operating system that the pages
-// contained in the specified range of memory will not be used for a
-// while, and can be released for use by other processes or the OS.
-// Pages which are released in this way may be destroyed (zeroed) by
-// the OS. The benefit of this function is that it frees memory for
-// use by the system, the cost is that the pages are faulted back into
-// the address space next time they are touched, which can impact
-// performance. (Only pages fully covered by the memory region will
-// be released, partial pages will not.)
-extern void TCMalloc_SystemRelease(void* start, size_t length);
-
-// Called to ressurect memory which has been previously released
-// to the system via TCMalloc_SystemRelease. An attempt to
-// commit a page that is already committed does not cause this
-// function to fail.
-extern void TCMalloc_SystemCommit(void* start, size_t length);
-
-// Interface to a pluggable system allocator.
-class SysAllocator {
- public:
- SysAllocator()
- : usable_(true),
- failed_(false) {
- };
- virtual ~SysAllocator() {};
-
- virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0;
-
- // Populate the map with whatever properties the specified allocator finds
- // useful for debugging (such as number of bytes allocated and whether the
- // allocator has failed). The callee is responsible for any necessary
- // locking (and avoiding deadlock).
- virtual void DumpStats(TCMalloc_Printer* printer) = 0;
-
- // So the allocator can be turned off at compile time
- bool usable_;
-
- // Did this allocator fail? If so, we don't need to retry more than twice.
- bool failed_;
-};
-
-// Register a new system allocator. The priority determines the order in
-// which the allocators will be invoked. Allocators with numerically lower
-// priority are tried first. To keep things simple, the priority of various
-// allocators is known at compile time.
-//
-// Valid range of priorities: [0, kMaxDynamicAllocators)
-//
-// Please note that we can't use complex data structures and cause
-// recursive calls to malloc within this function. So all data structures
-// are statically allocated.
-//
-// Returns true on success. Does nothing on failure.
-extern PERFTOOLS_DLL_DECL bool RegisterSystemAllocator(SysAllocator *allocator,
- int priority);
-
-// Number of SysAllocators known to call RegisterSystemAllocator
-static const int kMaxDynamicAllocators = 2;
-
-// Retrieve the current state of various system allocators.
-extern PERFTOOLS_DLL_DECL void DumpSystemAllocatorStats(TCMalloc_Printer* printer);
-
-#endif /* TCMALLOC_SYSTEM_ALLOC_H_ */
diff --git a/third_party/tcmalloc/tcmalloc.cc b/third_party/tcmalloc/tcmalloc.cc
deleted file mode 100644
index 66e0ea6..0000000
--- a/third_party/tcmalloc/tcmalloc.cc
+++ /dev/null
@@ -1,1312 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// A malloc that uses a per-thread cache to satisfy small malloc requests.
-// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
-//
-// See doc/tcmalloc.html for a high-level
-// description of how this malloc works.
-//
-// SYNCHRONIZATION
-// 1. The thread-specific lists are accessed without acquiring any locks.
-// This is safe because each such list is only accessed by one thread.
-// 2. We have a lock per central free-list, and hold it while manipulating
-// the central free list for a particular size.
-// 3. The central page allocator is protected by "pageheap_lock".
-// 4. The pagemap (which maps from page-number to descriptor),
-// can be read without holding any locks, and written while holding
-// the "pageheap_lock".
-// 5. To improve performance, a subset of the information one can get
-// from the pagemap is cached in a data structure, pagemap_cache_,
-// that atomically reads and writes its entries. This cache can be
-// read and written without locking.
-//
-// This multi-threaded access to the pagemap is safe for fairly
-// subtle reasons. We basically assume that when an object X is
-// allocated by thread A and deallocated by thread B, there must
-// have been appropriate synchronization in the handoff of object
-// X from thread A to thread B. The same logic applies to pagemap_cache_.
-//
-// THE PAGEID-TO-SIZECLASS CACHE
-// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
-// returns 0 for a particular PageID then that means "no information," not that
-// the sizeclass is 0. The cache may have stale information for pages that do
-// not hold the beginning of any free()'able object. Staleness is eliminated
-// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
-// do_memalign() for all other relevant pages.
-//
-// PAGEMAP
-// -------
-// Page map contains a mapping from page id to Span.
-//
-// If Span s occupies pages [p..q],
-// pagemap[p] == s
-// pagemap[q] == s
-// pagemap[p+1..q-1] are undefined
-// pagemap[p-1] and pagemap[q+1] are defined:
-// NULL if the corresponding page is not yet in the address space.
-// Otherwise it points to a Span. This span may be free
-// or allocated. If free, it is in one of pageheap's freelist.
-//
-// TODO: Bias reclamation to larger addresses
-// TODO: implement mallinfo/mallopt
-// TODO: Better testing
-//
-// 9/28/2003 (new page-level allocator replaces ptmalloc2):
-// * malloc/free of small objects goes from ~300 ns to ~50 ns.
-// * allocation of a reasonably complicated struct
-// goes from about 1100 ns to about 300 ns.
-
-#include <config.h>
-#include <new>
-#include <stdio.h>
-#include <stddef.h>
-#if defined HAVE_STDINT_H
-#include <stdint.h>
-#elif defined HAVE_INTTYPES_H
-#include <inttypes.h>
-#else
-#include <sys/types.h>
-#endif
-#if defined(HAVE_MALLOC_H) && defined(HAVE_STRUCT_MALLINFO)
-#include <malloc.h> // for struct mallinfo
-#endif
-#include <string.h>
-#ifdef HAVE_PTHREAD
-#include <pthread.h>
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <errno.h>
-#include <stdarg.h>
-#include <algorithm>
-#include <google/tcmalloc.h>
-#include "base/commandlineflags.h"
-#include "base/basictypes.h" // gets us PRIu64
-#include "base/sysinfo.h"
-#include "base/spinlock.h"
-#include "common.h"
-#include "malloc_hook-inl.h"
-#include <google/malloc_hook.h>
-#include <google/malloc_extension.h>
-#include "central_freelist.h"
-#include "internal_logging.h"
-#include "linked_list.h"
-#include "maybe_threads.h"
-#include "page_heap.h"
-#include "page_heap_allocator.h"
-#include "pagemap.h"
-#include "span.h"
-#include "static_vars.h"
-#include "system-alloc.h"
-#include "tcmalloc_guard.h"
-#include "thread_cache.h"
-
-#if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defined(WIN32_OVERRIDE_ALLOCATORS)
-# define WIN32_DO_PATCHING 1
-#endif
-
-using tcmalloc::PageHeap;
-using tcmalloc::PageHeapAllocator;
-using tcmalloc::SizeMap;
-using tcmalloc::Span;
-using tcmalloc::StackTrace;
-using tcmalloc::Static;
-using tcmalloc::ThreadCache;
-
-// __THROW is defined in glibc systems. It means, counter-intuitively,
-// "This function will never throw an exception." It's an optional
-// optimization tool, but we may need to use it to match glibc prototypes.
-#ifndef __THROW // I guess we're not on a glibc system
-# define __THROW // __THROW is just an optimization, so ok to make it ""
-#endif
-
-DECLARE_int64(tcmalloc_sample_parameter);
-DECLARE_double(tcmalloc_release_rate);
-
-// For windows, the printf we use to report large allocs is
-// potentially dangerous: it could cause a malloc that would cause an
-// infinite loop. So by default we set the threshold to a huge number
-// on windows, so this bad situation will never trigger. You can
-// always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you
-// want this functionality.
-#ifdef _WIN32
-const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 62;
-#else
-const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 30;
-#endif
-DEFINE_int64(tcmalloc_large_alloc_report_threshold,
- EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD",
- kDefaultLargeAllocReportThreshold),
- "Allocations larger than this value cause a stack "
- "trace to be dumped to stderr. The threshold for "
- "dumping stack traces is increased by a factor of 1.125 "
- "every time we print a message so that the threshold "
- "automatically goes up by a factor of ~1000 every 60 "
- "messages. This bounds the amount of extra logging "
- "generated by this flag. Default value of this flag "
- "is very large and therefore you should see no extra "
- "logging unless the flag is overridden. Set to 0 to "
- "disable reporting entirely.");
-
-
-// We already declared these functions in tcmalloc.h, but we have to
-// declare them again to give them an ATTRIBUTE_SECTION: we want to
-// put all callers of MallocHook::Invoke* in this module into
-// ATTRIBUTE_SECTION(google_malloc) section, so that
-// MallocHook::GetCallerStackTrace can function accurately.
-extern "C" {
- void* tc_malloc(size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void tc_free(void* ptr) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_realloc(void* ptr, size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_calloc(size_t nmemb, size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void tc_cfree(void* ptr) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-
- void* tc_memalign(size_t __alignment, size_t __size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- int tc_posix_memalign(void** ptr, size_t align, size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_valloc(size_t __size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_pvalloc(size_t __size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-
- void tc_malloc_stats(void) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- int tc_mallopt(int cmd, int value) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-#ifdef HAVE_STRUCT_MALLINFO // struct mallinfo isn't defined on freebsd
- struct mallinfo tc_mallinfo(void) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-#endif
-
- void* tc_new(size_t size)
- ATTRIBUTE_SECTION(google_malloc);
- void tc_delete(void* p) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_newarray(size_t size)
- ATTRIBUTE_SECTION(google_malloc);
- void tc_deletearray(void* p) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-
- // And the nothrow variants of these:
- void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-}
-
-// Override the libc functions to prefer our own instead. This comes
-// first so code in tcmalloc.cc can use the overridden versions. One
-// exception: in windows, by default, we patch our code into these
-// functions (via src/windows/patch_function.cc) rather than override
-// them. In that case, we don't want to do this overriding here.
-#ifndef WIN32_DO_PATCHING
-
-// TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that
-// elsewhere.
-#if 0
-
-#if defined(__GNUC__) && !defined(__MACH__)
- // Potentially faster variants that use the gcc alias extension.
- // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
- // FreeBSD does support aliases, but apparently not correctly. :-(
-# define ALIAS(x) __attribute__ ((alias (x)))
-void* operator new(size_t size) ALIAS("tc_new");
-void operator delete(void* p) __THROW ALIAS("tc_delete");
-void* operator new[](size_t size) ALIAS("tc_newarray");
-void operator delete[](void* p) __THROW ALIAS("tc_deletearray");
-void* operator new(size_t size, const std::nothrow_t&) __THROW
- ALIAS("tc_new_nothrow");
-void* operator new[](size_t size, const std::nothrow_t&) __THROW
- ALIAS("tc_newarray_nothrow");
-extern "C" {
- void* malloc(size_t size) __THROW ALIAS("tc_malloc");
- void free(void* ptr) __THROW ALIAS("tc_free");
- void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc");
- void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc");
- void cfree(void* ptr) __THROW ALIAS("tc_cfree");
- void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign");
- void* valloc(size_t size) __THROW ALIAS("tc_valloc");
- void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc");
- int posix_memalign(void** r, size_t a, size_t s) __THROW
- ALIAS("tc_posix_memalign");
- void malloc_stats(void) __THROW ALIAS("tc_malloc_stats");
- int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt");
-#ifdef HAVE_STRUCT_MALLINFO
- struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo");
-#endif
- // Some library routines on RedHat 9 allocate memory using malloc()
- // and free it using __libc_free() (or vice-versa). Since we provide
- // our own implementations of malloc/free, we need to make sure that
- // the __libc_XXX variants (defined as part of glibc) also point to
- // the same implementations.
-# if defined(__GLIBC__)
- void* __libc_malloc(size_t size) ALIAS("tc_malloc");
- void __libc_free(void* ptr) ALIAS("tc_free");
- void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc");
- void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc");
- void __libc_cfree(void* ptr) ALIAS("tc_cfree");
- void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign");
- void* __libc_valloc(size_t size) ALIAS("tc_valloc");
- void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc");
- int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign");
-# define HAVE_ALIASED___LIBC 1
-# endif // #if defined(__GLIBC__)
-} // extern "C"
-# undef ALIAS
-#else
-// Portable wrappers
-void* operator new(size_t size) { return tc_new(size); }
-void operator delete(void* p) __THROW { tc_delete(p); }
-void* operator new[](size_t size) { return tc_newarray(size); }
-void operator delete[](void* p) __THROW { tc_deletearray(p); }
-void* operator new(size_t size, const std::nothrow_t& nt) __THROW {
- return tc_new_nothrow(size, nt);
-}
-void* operator new[](size_t size, const std::nothrow_t& nt) __THROW {
- return tc_newarray_nothrow(size, nt);
-}
-extern "C" {
- void* malloc(size_t s) __THROW { return tc_malloc(s); }
- void free(void* p) __THROW { tc_free(p); }
- void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); }
- void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); }
- void cfree(void* p) __THROW { tc_cfree(p); }
- void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); }
- void* valloc(size_t s) __THROW { return tc_valloc(s); }
- void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); }
- int posix_memalign(void** r, size_t a, size_t s) __THROW {
- return tc_posix_memalign(r, a, s);
- }
- void malloc_stats(void) __THROW { tc_malloc_stats(); }
- int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); }
-#ifdef HAVE_STRUCT_MALLINFO
- struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); }
-#endif
-} // extern C
-#endif // #if defined(__GNUC__)
-
-#ifndef HAVE_ALIASED___LIBC
-extern "C" {
- void* __libc_malloc(size_t size) { return malloc(size); }
- void __libc_free(void* ptr) { free(ptr); }
- void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
- void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
- void __libc_cfree(void* ptr) { cfree(ptr); }
- void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
- void* __libc_valloc(size_t size) { return valloc(size); }
- void* __libc_pvalloc(size_t size) { return pvalloc(size); }
- int __posix_memalign(void** r, size_t a, size_t s) {
- return posix_memalign(r, a, s);
- }
-} // extern "C"
-#endif // #ifndef HAVE_ALIASED___LIBC
-
-#endif // #ifdef 0
-
-#endif // #ifndef WIN32_DO_PATCHING
-
-
-// ----------------------- IMPLEMENTATION -------------------------------
-
-// These routines are called by free(), realloc(), etc. if the pointer is
-// invalid. This is a cheap (source-editing required) kind of exception
-// handling for these routines.
-namespace {
-void InvalidFree(void* ptr) {
- CRASH("Attempt to free invalid pointer: %p\n", ptr);
-}
-
-size_t InvalidGetSizeForRealloc(void* old_ptr) {
- CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr);
- return 0;
-}
-
-size_t InvalidGetAllocatedSize(void* ptr) {
- CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr);
- return 0;
-}
-} // unnamed namespace
-
-// Extract interesting stats
-struct TCMallocStats {
- uint64_t system_bytes; // Bytes alloced from system
- uint64_t committed_bytes; // Bytes alloced and committed from system
- uint64_t thread_bytes; // Bytes in thread caches
- uint64_t central_bytes; // Bytes in central cache
- uint64_t transfer_bytes; // Bytes in central transfer cache
- uint64_t pageheap_bytes; // Bytes in page heap
- uint64_t metadata_bytes; // Bytes alloced for metadata
-};
-
-// Get stats into "r". Also get per-size-class counts if class_count != NULL
-static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
- r->central_bytes = 0;
- r->transfer_bytes = 0;
- for (int cl = 0; cl < kNumClasses; ++cl) {
- const int length = Static::central_cache()[cl].length();
- const int tc_length = Static::central_cache()[cl].tc_length();
- const size_t size = static_cast<uint64_t>(
- Static::sizemap()->ByteSizeForClass(cl));
- r->central_bytes += (size * length);
- r->transfer_bytes += (size * tc_length);
- if (class_count) class_count[cl] = length + tc_length;
- }
-
- // Add stats from per-thread heaps
- r->thread_bytes = 0;
- { // scope
- SpinLockHolder h(Static::pageheap_lock());
- ThreadCache::GetThreadStats(&r->thread_bytes, class_count);
- }
-
- { //scope
- SpinLockHolder h(Static::pageheap_lock());
- r->system_bytes = Static::pageheap()->SystemBytes();
- r->committed_bytes = Static::pageheap()->CommittedBytes();
- r->metadata_bytes = tcmalloc::metadata_system_bytes();
- r->pageheap_bytes = Static::pageheap()->FreeBytes();
- }
-}
-
-// WRITE stats to "out"
-static void DumpStats(TCMalloc_Printer* out, int level) {
- TCMallocStats stats;
- uint64_t class_count[kNumClasses];
- ExtractStats(&stats, (level >= 2 ? class_count : NULL));
-
- static const double MB = 1048576.0;
-
- const uint64_t bytes_in_use = stats.system_bytes
- - stats.pageheap_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.thread_bytes;
-
- out->printf("WASTE: %7.1f MB committed but not used\n"
- "WASTE: %7.1f MB bytes committed, %7.1f MB bytes in use\n"
- "WASTE: committed/used ratio of %f\n",
- (stats.committed_bytes - bytes_in_use) / MB,
- stats.committed_bytes / MB,
- bytes_in_use / MB,
- stats.committed_bytes / static_cast<double>(bytes_in_use));
-
- if (level >= 2) {
- out->printf("------------------------------------------------\n");
- uint64_t cumulative = 0;
- for (int cl = 0; cl < kNumClasses; ++cl) {
- if (class_count[cl] > 0) {
- uint64_t class_bytes =
- class_count[cl] * Static::sizemap()->ByteSizeForClass(cl);
- cumulative += class_bytes;
- out->printf("class %3d [ %8" PRIuS " bytes ] : "
- "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
- cl, Static::sizemap()->ByteSizeForClass(cl),
- class_count[cl],
- class_bytes / MB,
- cumulative / MB);
- }
- }
-
- SpinLockHolder h(Static::pageheap_lock());
- Static::pageheap()->Dump(out);
-
- out->printf("------------------------------------------------\n");
- DumpSystemAllocatorStats(out);
- }
-
- out->printf("------------------------------------------------\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n"
- "MALLOC: %12" PRIu64 " Spans in use\n"
- "MALLOC: %12" PRIu64 " Thread heaps in use\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n"
- "------------------------------------------------\n",
- stats.system_bytes, stats.system_bytes / MB,
- stats.committed_bytes, stats.committed_bytes / MB,
- bytes_in_use, bytes_in_use / MB,
- stats.pageheap_bytes, stats.pageheap_bytes / MB,
- stats.central_bytes, stats.central_bytes / MB,
- stats.transfer_bytes, stats.transfer_bytes / MB,
- stats.thread_bytes, stats.thread_bytes / MB,
- uint64_t(Static::span_allocator()->inuse()),
- uint64_t(ThreadCache::HeapsInUse()),
- stats.metadata_bytes, stats.metadata_bytes / MB);
-}
-
-static void PrintStats(int level) {
- const int kBufferSize = 16 << 10;
- char* buffer = new char[kBufferSize];
- TCMalloc_Printer printer(buffer, kBufferSize);
- DumpStats(&printer, level);
- write(STDERR_FILENO, buffer, strlen(buffer));
- delete[] buffer;
-}
-
-static void** DumpHeapGrowthStackTraces() {
- // Count how much space we need
- int needed_slots = 0;
- {
- SpinLockHolder h(Static::pageheap_lock());
- for (StackTrace* t = Static::growth_stacks();
- t != NULL;
- t = reinterpret_cast<StackTrace*>(
- t->stack[tcmalloc::kMaxStackDepth-1])) {
- needed_slots += 3 + t->depth;
- }
- needed_slots += 100; // Slop in case list grows
- needed_slots += needed_slots/8; // An extra 12.5% slop
- }
-
- void** result = new void*[needed_slots];
- if (result == NULL) {
- MESSAGE("tcmalloc: allocation failed for stack trace slots",
- needed_slots * sizeof(*result));
- return NULL;
- }
-
- SpinLockHolder h(Static::pageheap_lock());
- int used_slots = 0;
- for (StackTrace* t = Static::growth_stacks();
- t != NULL;
- t = reinterpret_cast<StackTrace*>(
- t->stack[tcmalloc::kMaxStackDepth-1])) {
- ASSERT(used_slots < needed_slots); // Need to leave room for terminator
- if (used_slots + 3 + t->depth >= needed_slots) {
- // No more room
- break;
- }
-
- result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
- result[used_slots+1] = reinterpret_cast<void*>(t->size);
- result[used_slots+2] = reinterpret_cast<void*>(t->depth);
- for (int d = 0; d < t->depth; d++) {
- result[used_slots+3+d] = t->stack[d];
- }
- used_slots += 3 + t->depth;
- }
- result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
- return result;
-}
-
-// TCMalloc's support for extra malloc interfaces
-class TCMallocImplementation : public MallocExtension {
- public:
- virtual void GetStats(char* buffer, int buffer_length) {
- ASSERT(buffer_length > 0);
- TCMalloc_Printer printer(buffer, buffer_length);
-
- // Print level one stats unless lots of space is available
- if (buffer_length < 10000) {
- DumpStats(&printer, 1);
- } else {
- DumpStats(&printer, 2);
- }
- }
-
- virtual void** ReadStackTraces(int* sample_period) {
- tcmalloc::StackTraceTable table;
- {
- SpinLockHolder h(Static::pageheap_lock());
- Span* sampled = Static::sampled_objects();
- for (Span* s = sampled->next; s != sampled; s = s->next) {
- table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects));
- }
- }
- *sample_period = ThreadCache::GetCache()->GetSamplePeriod();
- return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock
- }
-
- virtual void** ReadHeapGrowthStackTraces() {
- return DumpHeapGrowthStackTraces();
- }
-
- virtual bool GetNumericProperty(const char* name, size_t* value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "generic.current_allocated_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.pageheap_bytes;
- return true;
- }
-
- if (strcmp(name, "generic.heap_size") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes;
- return true;
- }
-
- if (strcmp(name, "generic.committed_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.committed_bytes + stats.metadata_bytes;
- return true;
- }
-
- if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
- // We assume that bytes in the page heap are not fragmented too
- // badly, and are therefore available for allocation.
- SpinLockHolder l(Static::pageheap_lock());
- *value = Static::pageheap()->FreeBytes();
- return true;
- }
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- SpinLockHolder l(Static::pageheap_lock());
- *value = ThreadCache::overall_thread_cache_size();
- return true;
- }
-
- if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.thread_bytes;
- return true;
- }
-
- return false;
- }
-
- virtual bool SetNumericProperty(const char* name, size_t value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- SpinLockHolder l(Static::pageheap_lock());
- ThreadCache::set_overall_thread_cache_size(value);
- return true;
- }
-
- return false;
- }
-
- virtual void MarkThreadIdle() {
- ThreadCache::BecomeIdle();
- }
-
- virtual void ReleaseFreeMemory() {
- SpinLockHolder h(Static::pageheap_lock());
- Static::pageheap()->ReleaseFreePages();
- }
-
- virtual void SetMemoryReleaseRate(double rate) {
- FLAGS_tcmalloc_release_rate = rate;
- }
-
- virtual double GetMemoryReleaseRate() {
- return FLAGS_tcmalloc_release_rate;
- }
- virtual size_t GetEstimatedAllocatedSize(size_t size) {
- if (size <= kMaxSize) {
- const size_t cl = Static::sizemap()->SizeClass(size);
- const size_t alloc_size = Static::sizemap()->ByteSizeForClass(cl);
- return alloc_size;
- } else {
- return tcmalloc::pages(size) << kPageShift;
- }
- }
-
- // This just calls GetSizeWithCallback, but because that's in an
- // unnamed namespace, we need to move the definition below it in the
- // file.
- virtual size_t GetAllocatedSize(void* ptr);
-};
-
-// The constructor allocates an object to ensure that initialization
-// runs before main(), and therefore we do not have a chance to become
-// multi-threaded before initialization. We also create the TSD key
-// here. Presumably by the time this constructor runs, glibc is in
-// good enough shape to handle pthread_key_create().
-//
-// The constructor also takes the opportunity to tell STL to use
-// tcmalloc. We want to do this early, before construct time, so
-// all user STL allocations go through tcmalloc (which works really
-// well for STL).
-//
-// The destructor prints stats when the program exits.
-static int tcmallocguard_refcount = 0; // no lock needed: runs before main()
-TCMallocGuard::TCMallocGuard() {
- if (tcmallocguard_refcount++ == 0) {
-#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
- // Check whether the kernel also supports TLS (needs to happen at runtime)
- tcmalloc::CheckIfKernelSupportsTLS();
-#endif
-#ifdef WIN32_DO_PATCHING
- // patch the windows VirtualAlloc, etc.
- PatchWindowsFunctions(); // defined in windows/patch_functions.cc
-#endif
- free(malloc(1));
- ThreadCache::InitTSD();
- free(malloc(1));
- MallocExtension::Register(new TCMallocImplementation);
- }
-}
-
-TCMallocGuard::~TCMallocGuard() {
- if (--tcmallocguard_refcount == 0) {
- const char* env = getenv("MALLOCSTATS");
- if (env != NULL) {
- int level = atoi(env);
- if (level < 1) level = 1;
- PrintStats(level);
- }
- }
-}
-#ifndef WIN32_OVERRIDE_ALLOCATORS
-static TCMallocGuard module_enter_exit_hook;
-#endif
-
-//-------------------------------------------------------------------
-// Helpers for the exported routines below
-//-------------------------------------------------------------------
-
-static Span* DoSampledAllocation(size_t size) {
- // Grab the stack trace outside the heap lock
- StackTrace tmp;
- tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1);
- tmp.size = size;
-
- SpinLockHolder h(Static::pageheap_lock());
- // Allocate span
- Span *span = Static::pageheap()->New(tcmalloc::pages(size == 0 ? 1 : size));
- if (span == NULL) {
- return NULL;
- }
-
- // Allocate stack trace
- StackTrace *stack = Static::stacktrace_allocator()->New();
- if (stack == NULL) {
- // Sampling failed because of lack of memory
- return span;
- }
-
- *stack = tmp;
- span->sample = 1;
- span->objects = stack;
- tcmalloc::DLL_Prepend(Static::sampled_objects(), span);
-
- return span;
-}
-
-static inline bool CheckCachedSizeClass(void *ptr) {
- PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p);
- return cached_value == 0 ||
- cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass;
-}
-
-static inline void* CheckedMallocResult(void *result)
-{
- ASSERT(result == 0 || CheckCachedSizeClass(result));
- return result;
-}
-
-static inline void* SpanToMallocResult(Span *span) {
- Static::pageheap()->CacheSizeClass(span->start, 0);
- return
- CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
-}
-
-// Copy of FLAGS_tcmalloc_large_alloc_report_threshold with
-// automatic increases factored in.
-static int64_t large_alloc_threshold =
- (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold
- ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold);
-
-static void ReportLargeAlloc(Length num_pages, void* result) {
- StackTrace stack;
- stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1);
-
- static const int N = 1000;
- char buffer[N];
- TCMalloc_Printer printer(buffer, N);
- printer.printf("tcmalloc: large alloc %llu bytes == %p @ ",
- static_cast<unsigned long long>(num_pages) << kPageShift,
- result);
- for (int i = 0; i < stack.depth; i++) {
- printer.printf(" %p", stack.stack[i]);
- }
- printer.printf("\n");
- write(STDERR_FILENO, buffer, strlen(buffer));
-}
-
-namespace {
-
-// Helper for do_malloc().
-inline void* do_malloc_pages(Length num_pages) {
- Span *span;
- bool report_large = false;
- {
- SpinLockHolder h(Static::pageheap_lock());
- span = Static::pageheap()->New(num_pages);
- const int64 threshold = large_alloc_threshold;
- if (threshold > 0 && num_pages >= (threshold >> kPageShift)) {
- // Increase the threshold by 1/8 every time we generate a report.
- // We cap the threshold at 8GB to avoid overflow problems.
- large_alloc_threshold = (threshold + threshold/8 < 8ll<<30
- ? threshold + threshold/8 : 8ll<<30);
- report_large = true;
- }
- }
-
- void* result = (span == NULL ? NULL : SpanToMallocResult(span));
- if (report_large) {
- ReportLargeAlloc(num_pages, result);
- }
- return result;
-}
-
-inline void* do_malloc(size_t size) {
- void* ret = NULL;
-
- // The following call forces module initialization
- ThreadCache* heap = ThreadCache::GetCache();
- if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
- Span* span = DoSampledAllocation(size);
- if (span != NULL) {
- ret = SpanToMallocResult(span);
- }
- } else if (size <= kMaxSize) {
- // The common case, and also the simplest. This just pops the
- // size-appropriate freelist, after replenishing it if it's empty.
- ret = CheckedMallocResult(heap->Allocate(size));
- } else {
- ret = do_malloc_pages(tcmalloc::pages(size));
- }
- if (ret == NULL) errno = ENOMEM;
- return ret;
-}
-
-inline void* do_calloc(size_t n, size_t elem_size) {
- // Overflow check
- const size_t size = n * elem_size;
- if (elem_size != 0 && size / elem_size != n) return NULL;
-
- void* result = do_malloc(size);
- if (result != NULL) {
- memset(result, 0, size);
- }
- return result;
-}
-
-static inline ThreadCache* GetCacheIfPresent() {
- void* const p = ThreadCache::GetCacheIfPresent();
- return reinterpret_cast<ThreadCache*>(p);
-}
-
-// This lets you call back to a given function pointer if ptr is invalid.
-// It is used primarily by windows code which wants a specialized callback.
-inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) {
- if (ptr == NULL) return;
- ASSERT(Static::pageheap() != NULL); // Should not call free() before malloc()
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- Span* span = NULL;
- size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
-
- if (cl == 0) {
- span = Static::pageheap()->GetDescriptor(p);
- if (!span) {
- // span can be NULL because the pointer passed in is invalid
- // (not something returned by malloc or friends), or because the
- // pointer was allocated with some other allocator besides
- // tcmalloc. The latter can happen if tcmalloc is linked in via
- // a dynamic library, but is not listed last on the link line.
- // In that case, libraries after it on the link line will
- // allocate with libc malloc, but free with tcmalloc's free.
- (*invalid_free_fn)(ptr); // Decide how to handle the bad free request
- return;
- }
- cl = span->sizeclass;
- Static::pageheap()->CacheSizeClass(p, cl);
- }
- if (cl != 0) {
- ASSERT(!Static::pageheap()->GetDescriptor(p)->sample);
- ThreadCache* heap = GetCacheIfPresent();
- if (heap != NULL) {
- heap->Deallocate(ptr, cl);
- } else {
- // Delete directly into central cache
- tcmalloc::SLL_SetNext(ptr, NULL);
- Static::central_cache()[cl].InsertRange(ptr, ptr, 1);
- }
- } else {
- SpinLockHolder h(Static::pageheap_lock());
- ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
- ASSERT(span != NULL && span->start == p);
- if (span->sample) {
- tcmalloc::DLL_Remove(span);
- Static::stacktrace_allocator()->Delete(
- reinterpret_cast<StackTrace*>(span->objects));
- span->objects = NULL;
- }
- Static::pageheap()->Delete(span);
- }
-}
-
-// The default "do_free" that uses the default callback.
-inline void do_free(void* ptr) {
- return do_free_with_callback(ptr, &InvalidFree);
-}
-
-inline size_t GetSizeWithCallback(void* ptr,
- size_t (*invalid_getsize_fn)(void*)) {
- if (ptr == NULL)
- return 0;
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
- if (cl != 0) {
- return Static::sizemap()->ByteSizeForClass(cl);
- } else {
- Span *span = Static::pageheap()->GetDescriptor(p);
- if (span == NULL) { // means we do not own this memory
- return (*invalid_getsize_fn)(ptr);
- } else if (span->sizeclass != 0) {
- Static::pageheap()->CacheSizeClass(p, span->sizeclass);
- return Static::sizemap()->ByteSizeForClass(span->sizeclass);
- } else {
- return span->length << kPageShift;
- }
- }
-}
-
-// This lets you call back to a given function pointer if ptr is invalid.
-// It is used primarily by windows code which wants a specialized callback.
-inline void* do_realloc_with_callback(
- void* old_ptr, size_t new_size,
- void (*invalid_free_fn)(void*),
- size_t (*invalid_get_size_fn)(void*)) {
- // Get the size of the old entry
- const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn);
-
- // Reallocate if the new size is larger than the old size,
- // or if the new size is significantly smaller than the old size.
- // We do hysteresis to avoid resizing ping-pongs:
- // . If we need to grow, grow to max(new_size, old_size * 1.X)
- // . Don't shrink unless new_size < old_size * 0.Y
- // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5.
- const int lower_bound_to_grow = old_size + old_size / 4;
- const int upper_bound_to_shrink = old_size / 2;
- if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) {
- // Need to reallocate.
- void* new_ptr = NULL;
-
- if (new_size > old_size && new_size < lower_bound_to_grow) {
- new_ptr = do_malloc(lower_bound_to_grow);
- }
- if (new_ptr == NULL) {
- // Either new_size is not a tiny increment, or last do_malloc failed.
- new_ptr = do_malloc(new_size);
- }
- if (new_ptr == NULL) {
- return NULL;
- }
- MallocHook::InvokeNewHook(new_ptr, new_size);
- memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
- MallocHook::InvokeDeleteHook(old_ptr);
- // We could use a variant of do_free() that leverages the fact
- // that we already know the sizeclass of old_ptr. The benefit
- // would be small, so don't bother.
- do_free_with_callback(old_ptr, invalid_free_fn);
- return new_ptr;
- } else {
- // We still need to call hooks to report the updated size:
- MallocHook::InvokeDeleteHook(old_ptr);
- MallocHook::InvokeNewHook(old_ptr, new_size);
- return old_ptr;
- }
-}
-
-inline void* do_realloc(void* old_ptr, size_t new_size) {
- return do_realloc_with_callback(old_ptr, new_size,
- &InvalidFree, &InvalidGetSizeForRealloc);
-}
-
-// For use by exported routines below that want specific alignments
-//
-// Note: this code can be slow, and can significantly fragment memory.
-// The expectation is that memalign/posix_memalign/valloc/pvalloc will
-// not be invoked very often. This requirement simplifies our
-// implementation and allows us to tune for expected allocation
-// patterns.
-void* do_memalign(size_t align, size_t size) {
- ASSERT((align & (align - 1)) == 0);
- ASSERT(align > 0);
- if (size + align < size) return NULL; // Overflow
-
- if (Static::pageheap() == NULL) ThreadCache::InitModule();
-
- // Allocate at least one byte to avoid boundary conditions below
- if (size == 0) size = 1;
-
- if (size <= kMaxSize && align < kPageSize) {
- // Search through acceptable size classes looking for one with
- // enough alignment. This depends on the fact that
- // InitSizeClasses() currently produces several size classes that
- // are aligned at powers of two. We will waste time and space if
- // we miss in the size class array, but that is deemed acceptable
- // since memalign() should be used rarely.
- int cl = Static::sizemap()->SizeClass(size);
- while (cl < kNumClasses &&
- ((Static::sizemap()->class_to_size(cl) & (align - 1)) != 0)) {
- cl++;
- }
- if (cl < kNumClasses) {
- ThreadCache* heap = ThreadCache::GetCache();
- return CheckedMallocResult(heap->Allocate(
- Static::sizemap()->class_to_size(cl)));
- }
- }
-
- // We will allocate directly from the page heap
- SpinLockHolder h(Static::pageheap_lock());
-
- if (align <= kPageSize) {
- // Any page-level allocation will be fine
- // TODO: We could put the rest of this page in the appropriate
- // TODO: cache but it does not seem worth it.
- Span* span = Static::pageheap()->New(tcmalloc::pages(size));
- return span == NULL ? NULL : SpanToMallocResult(span);
- }
-
- // Allocate extra pages and carve off an aligned portion
- const Length alloc = tcmalloc::pages(size + align);
- Span* span = Static::pageheap()->New(alloc);
- if (span == NULL) return NULL;
-
- // Skip starting portion so that we end up aligned
- Length skip = 0;
- while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
- skip++;
- }
- ASSERT(skip < alloc);
- if (skip > 0) {
- Span* rest = Static::pageheap()->Split(span, skip);
- Static::pageheap()->Delete(span);
- span = rest;
- }
-
- // Skip trailing portion that we do not need to return
- const Length needed = tcmalloc::pages(size);
- ASSERT(span->length >= needed);
- if (span->length > needed) {
- Span* trailer = Static::pageheap()->Split(span, needed);
- Static::pageheap()->Delete(trailer);
- }
- return SpanToMallocResult(span);
-}
-
-// Helpers for use by exported routines below:
-
-inline void do_malloc_stats() {
- PrintStats(1);
-}
-
-inline int do_mallopt(int cmd, int value) {
- return 1; // Indicates error
-}
-
-#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
-inline struct mallinfo do_mallinfo() {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
-
- // Just some of the fields are filled in.
- struct mallinfo info;
- memset(&info, 0, sizeof(info));
-
- // Unfortunately, the struct contains "int" field, so some of the
- // size values will be truncated.
- info.arena = static_cast<int>(stats.system_bytes);
- info.fsmblks = static_cast<int>(stats.thread_bytes
- + stats.central_bytes
- + stats.transfer_bytes);
- info.fordblks = static_cast<int>(stats.pageheap_bytes);
- info.uordblks = static_cast<int>(stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.pageheap_bytes);
-
- return info;
-}
-#endif // #ifndef HAVE_STRUCT_MALLINFO
-
-static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED);
-
-inline void* cpp_alloc(size_t size, bool nothrow) {
- for (;;) {
- void* p = do_malloc(size);
-#ifdef PREANSINEW
- return p;
-#else
- if (p == NULL) { // allocation failed
- // Get the current new handler. NB: this function is not
- // thread-safe. We make a feeble stab at making it so here, but
- // this lock only protects against tcmalloc interfering with
- // itself, not with other libraries calling set_new_handler.
- std::new_handler nh;
- {
- SpinLockHolder h(&set_new_handler_lock);
- nh = std::set_new_handler(0);
- (void) std::set_new_handler(nh);
- }
-#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
- if (nh) {
- // Since exceptions are disabled, we don't really know if new_handler
- // failed. Assume it will abort if it fails.
- (*nh)();
- continue;
- }
- return 0;
-#else
- // If no new_handler is established, the allocation failed.
- if (!nh) {
- if (nothrow) return 0;
- throw std::bad_alloc();
- }
- // Otherwise, try the new_handler. If it returns, retry the
- // allocation. If it throws std::bad_alloc, fail the allocation.
- // if it throws something else, don't interfere.
- try {
- (*nh)();
- } catch (const std::bad_alloc&) {
- if (!nothrow) throw;
- return p;
- }
-#endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
- } else { // allocation success
- return p;
- }
-#endif // PREANSINEW
- }
-}
-
-} // end unnamed namespace
-
-// As promised, the definition of this function, declared above.
-size_t TCMallocImplementation::GetAllocatedSize(void* ptr) {
- return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize);
-}
-
-//-------------------------------------------------------------------
-// Exported routines
-//-------------------------------------------------------------------
-
-// CAVEAT: The code structure below ensures that MallocHook methods are always
-// called from the stack frame of the invoked allocation function.
-// heap-checker.cc depends on this to start a stack trace from
-// the call to the (de)allocation function.
-
-static int tc_new_mode = 0; // See tc_set_new_mode().
-extern "C" void* tc_malloc(size_t size) __THROW {
- void* result = (tc_new_mode ? cpp_alloc(size, false) : do_malloc(size));
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void tc_free(void* ptr) __THROW {
- MallocHook::InvokeDeleteHook(ptr);
- do_free(ptr);
-}
-
-extern "C" void* tc_calloc(size_t n, size_t elem_size) __THROW {
- void* result = do_calloc(n, elem_size);
- MallocHook::InvokeNewHook(result, n * elem_size);
- return result;
-}
-
-extern "C" void tc_cfree(void* ptr) __THROW {
- MallocHook::InvokeDeleteHook(ptr);
- do_free(ptr);
-}
-
-extern "C" void* tc_realloc(void* old_ptr, size_t new_size) __THROW {
- if (old_ptr == NULL) {
- void* result = do_malloc(new_size);
- MallocHook::InvokeNewHook(result, new_size);
- return result;
- }
- if (new_size == 0) {
- MallocHook::InvokeDeleteHook(old_ptr);
- do_free(old_ptr);
- return NULL;
- }
- return do_realloc(old_ptr, new_size);
-}
-
-extern "C" void* tc_new(size_t size) {
- void* p = cpp_alloc(size, false);
- // We keep this next instruction out of cpp_alloc for a reason: when
- // it's in, and new just calls cpp_alloc, the optimizer may fold the
- // new call into cpp_alloc, which messes up our whole section-based
- // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
- // isn't the last thing this fn calls, and prevents the folding.
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW {
- void* p = cpp_alloc(size, true);
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" void tc_delete(void* p) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-extern "C" void* tc_newarray(size_t size) {
- void* p = cpp_alloc(size, false);
- // We keep this next instruction out of cpp_alloc for a reason: when
- // it's in, and new just calls cpp_alloc, the optimizer may fold the
- // new call into cpp_alloc, which messes up our whole section-based
- // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
- // isn't the last thing this fn calls, and prevents the folding.
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW {
- void* p = cpp_alloc(size, true);
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" void tc_deletearray(void* p) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-extern "C" void* tc_memalign(size_t align, size_t size) __THROW {
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" int tc_posix_memalign(void** result_ptr, size_t align, size_t size)
- __THROW {
- if (((align % sizeof(void*)) != 0) ||
- ((align & (align - 1)) != 0) ||
- (align == 0)) {
- return EINVAL;
- }
-
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- if (result == NULL) {
- return ENOMEM;
- } else {
- *result_ptr = result;
- return 0;
- }
-}
-
-static size_t pagesize = 0;
-
-extern "C" void* tc_valloc(size_t size) __THROW {
- // Allocate page-aligned object of length >= size bytes
- if (pagesize == 0) pagesize = getpagesize();
- void* result = do_memalign(pagesize, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void* tc_pvalloc(size_t size) __THROW {
- // Round up size to a multiple of pagesize
- if (pagesize == 0) pagesize = getpagesize();
- size = (size + pagesize - 1) & ~(pagesize - 1);
- void* result = do_memalign(pagesize, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void tc_malloc_stats(void) __THROW {
- do_malloc_stats();
-}
-
-extern "C" int tc_mallopt(int cmd, int value) __THROW {
- return do_mallopt(cmd, value);
-}
-
-#ifdef HAVE_STRUCT_MALLINFO
-extern "C" struct mallinfo tc_mallinfo(void) __THROW {
- return do_mallinfo();
-}
-#endif
-
-// This function behaves similarly to MSVC's _set_new_mode.
-// If flag is 0 (default), calls to malloc will behave normally.
-// If flag is 1, calls to malloc will behave like calls to new,
-// and the std_new_handler will be invoked on failure.
-// Returns the previous mode.
-extern "C" int tc_set_new_mode(int flag) __THROW {
- int old_mode = tc_new_mode;
- tc_new_mode = flag;
- return old_mode;
-}
-
-
-// Override __libc_memalign in libc on linux boxes specially.
-// They have a bug in libc that causes them to (very rarely) allocate
-// with __libc_memalign() yet deallocate with free() and the
-// definitions above don't catch it.
-// This function is an exception to the rule of calling MallocHook method
-// from the stack frame of the allocation function;
-// heap-checker handles this special case explicitly.
-static void *MemalignOverride(size_t align, size_t size, const void *caller)
- __THROW ATTRIBUTE_SECTION(google_malloc);
-
-static void *MemalignOverride(size_t align, size_t size, const void *caller)
- __THROW {
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
diff --git a/third_party/tcmalloc/tcmalloc_linux.cc b/third_party/tcmalloc/tcmalloc_linux.cc
deleted file mode 100644
index 5b8e54a..0000000
--- a/third_party/tcmalloc/tcmalloc_linux.cc
+++ /dev/null
@@ -1,1417 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// A malloc that uses a per-thread cache to satisfy small malloc requests.
-// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
-//
-// See doc/tcmalloc.html for a high-level
-// description of how this malloc works.
-//
-// SYNCHRONIZATION
-// 1. The thread-specific lists are accessed without acquiring any locks.
-// This is safe because each such list is only accessed by one thread.
-// 2. We have a lock per central free-list, and hold it while manipulating
-// the central free list for a particular size.
-// 3. The central page allocator is protected by "pageheap_lock".
-// 4. The pagemap (which maps from page-number to descriptor),
-// can be read without holding any locks, and written while holding
-// the "pageheap_lock".
-// 5. To improve performance, a subset of the information one can get
-// from the pagemap is cached in a data structure, pagemap_cache_,
-// that atomically reads and writes its entries. This cache can be
-// read and written without locking.
-//
-// This multi-threaded access to the pagemap is safe for fairly
-// subtle reasons. We basically assume that when an object X is
-// allocated by thread A and deallocated by thread B, there must
-// have been appropriate synchronization in the handoff of object
-// X from thread A to thread B. The same logic applies to pagemap_cache_.
-//
-// THE PAGEID-TO-SIZECLASS CACHE
-// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
-// returns 0 for a particular PageID then that means "no information," not that
-// the sizeclass is 0. The cache may have stale information for pages that do
-// not hold the beginning of any free()'able object. Staleness is eliminated
-// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
-// do_memalign() for all other relevant pages.
-//
-// PAGEMAP
-// -------
-// Page map contains a mapping from page id to Span.
-//
-// If Span s occupies pages [p..q],
-// pagemap[p] == s
-// pagemap[q] == s
-// pagemap[p+1..q-1] are undefined
-// pagemap[p-1] and pagemap[q+1] are defined:
-// NULL if the corresponding page is not yet in the address space.
-// Otherwise it points to a Span. This span may be free
-// or allocated. If free, it is in one of pageheap's freelist.
-//
-// TODO: Bias reclamation to larger addresses
-// TODO: implement mallinfo/mallopt
-// TODO: Better testing
-//
-// 9/28/2003 (new page-level allocator replaces ptmalloc2):
-// * malloc/free of small objects goes from ~300 ns to ~50 ns.
-// * allocation of a reasonably complicated struct
-// goes from about 1100 ns to about 300 ns.
-
-#include <config.h>
-#include <new>
-#include <stdio.h>
-#include <stddef.h>
-#if defined HAVE_STDINT_H
-#include <stdint.h>
-#elif defined HAVE_INTTYPES_H
-#include <inttypes.h>
-#else
-#include <sys/types.h>
-#endif
-#if defined(HAVE_MALLOC_H) && defined(HAVE_STRUCT_MALLINFO)
-#include <malloc.h> // for struct mallinfo
-#endif
-#include <string.h>
-#ifdef HAVE_PTHREAD
-#include <pthread.h>
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <errno.h>
-#include <stdarg.h>
-#include <algorithm>
-#include <google/tcmalloc.h>
-#include "base/commandlineflags.h"
-#include "base/basictypes.h" // gets us PRIu64
-#include "base/sysinfo.h"
-#include "base/spinlock.h"
-#include "common.h"
-#include "malloc_hook-inl.h"
-#include <google/malloc_hook.h>
-#include <google/malloc_extension.h>
-#include "central_freelist.h"
-#include "internal_logging.h"
-#include "linked_list.h"
-#include "maybe_threads.h"
-#include "page_heap.h"
-#include "page_heap_allocator.h"
-#include "pagemap.h"
-#include "span.h"
-#include "static_vars.h"
-#include "system-alloc.h"
-#include "tcmalloc_guard.h"
-#include "thread_cache.h"
-
-#if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defined(WIN32_OVERRIDE_ALLOCATORS)
-# define WIN32_DO_PATCHING 1
-#endif
-
-using tcmalloc::PageHeap;
-using tcmalloc::PageHeapAllocator;
-using tcmalloc::SizeMap;
-using tcmalloc::Span;
-using tcmalloc::StackTrace;
-using tcmalloc::Static;
-using tcmalloc::ThreadCache;
-
-// __THROW is defined in glibc systems. It means, counter-intuitively,
-// "This function will never throw an exception." It's an optional
-// optimization tool, but we may need to use it to match glibc prototypes.
-#ifndef __THROW // I guess we're not on a glibc system
-# define __THROW // __THROW is just an optimization, so ok to make it ""
-#endif
-
-DECLARE_int64(tcmalloc_sample_parameter);
-DECLARE_double(tcmalloc_release_rate);
-
-// For windows, the printf we use to report large allocs is
-// potentially dangerous: it could cause a malloc that would cause an
-// infinite loop. So by default we set the threshold to a huge number
-// on windows, so this bad situation will never trigger. You can
-// always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you
-// want this functionality.
-#ifdef _WIN32
-const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 62;
-#else
-const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 30;
-#endif
-DEFINE_int64(tcmalloc_large_alloc_report_threshold,
- EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD",
- kDefaultLargeAllocReportThreshold),
- "Allocations larger than this value cause a stack "
- "trace to be dumped to stderr. The threshold for "
- "dumping stack traces is increased by a factor of 1.125 "
- "every time we print a message so that the threshold "
- "automatically goes up by a factor of ~1000 every 60 "
- "messages. This bounds the amount of extra logging "
- "generated by this flag. Default value of this flag "
- "is very large and therefore you should see no extra "
- "logging unless the flag is overridden. Set to 0 to "
- "disable reporting entirely.");
-
-
-// We already declared these functions in tcmalloc.h, but we have to
-// declare them again to give them an ATTRIBUTE_SECTION: we want to
-// put all callers of MallocHook::Invoke* in this module into
-// ATTRIBUTE_SECTION(google_malloc) section, so that
-// MallocHook::GetCallerStackTrace can function accurately.
-#ifndef _WIN32 // windows doesn't have attribute_section, so don't bother
-extern "C" {
- void* tc_malloc(size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void tc_free(void* ptr) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_realloc(void* ptr, size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_calloc(size_t nmemb, size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void tc_cfree(void* ptr) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-
- void* tc_memalign(size_t __alignment, size_t __size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- int tc_posix_memalign(void** ptr, size_t align, size_t size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_valloc(size_t __size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_pvalloc(size_t __size) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-
- void tc_malloc_stats(void) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- int tc_mallopt(int cmd, int value) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-#ifdef HAVE_STRUCT_MALLINFO // struct mallinfo isn't defined on freebsd
- struct mallinfo tc_mallinfo(void) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-#endif
-
- void* tc_new(size_t size)
- ATTRIBUTE_SECTION(google_malloc);
- void tc_delete(void* p) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_newarray(size_t size)
- ATTRIBUTE_SECTION(google_malloc);
- void tc_deletearray(void* p) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-
- // And the nothrow variants of these:
- void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- // Surprisingly, compilers use a nothrow-delete internally. See, eg:
- // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html
- void tc_delete_nothrow(void* ptr, const std::nothrow_t&) __THROW
- ATTRIBUTE_SECTION(google_malloc);
- void tc_deletearray_nothrow(void* ptr, const std::nothrow_t&) __THROW
- ATTRIBUTE_SECTION(google_malloc);
-} // extern "C"
-#endif // #ifndef _WIN32
-
-// Override the libc functions to prefer our own instead. This comes
-// first so code in tcmalloc.cc can use the overridden versions. One
-// exception: in windows, by default, we patch our code into these
-// functions (via src/windows/patch_function.cc) rather than override
-// them. In that case, we don't want to do this overriding here.
-#if !defined(WIN32_DO_PATCHING) && !defined(TCMALLOC_FOR_DEBUGALLOCATION)
-
-#if defined(__GNUC__) && !defined(__MACH__)
- // Potentially faster variants that use the gcc alias extension.
- // FreeBSD does support aliases, but apparently not correctly. :-(
- // NOTE: we make many of these symbols weak, but do so in the makefile
- // (via objcopy -W) and not here. That ends up being more portable.
-# define ALIAS(x) __attribute__ ((alias (x)))
-void* operator new(size_t size) ALIAS("tc_new");
-void operator delete(void* p) __THROW ALIAS("tc_delete");
-void* operator new[](size_t size) ALIAS("tc_newarray");
-void operator delete[](void* p) __THROW ALIAS("tc_deletearray");
-void* operator new(size_t size, const std::nothrow_t&) __THROW
- ALIAS("tc_new_nothrow");
-void* operator new[](size_t size, const std::nothrow_t&) __THROW
- ALIAS("tc_newarray_nothrow");
-void operator delete(void* size, const std::nothrow_t&) __THROW
- ALIAS("tc_delete_nothrow");
-void operator delete[](void* size, const std::nothrow_t&) __THROW
- ALIAS("tc_deletearray_nothrow");
-extern "C" {
- void* malloc(size_t size) __THROW ALIAS("tc_malloc");
- void free(void* ptr) __THROW ALIAS("tc_free");
- void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc");
- void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc");
- void cfree(void* ptr) __THROW ALIAS("tc_cfree");
- void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign");
- void* valloc(size_t size) __THROW ALIAS("tc_valloc");
- void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc");
- int posix_memalign(void** r, size_t a, size_t s) __THROW
- ALIAS("tc_posix_memalign");
- void malloc_stats(void) __THROW ALIAS("tc_malloc_stats");
- int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt");
-#ifdef HAVE_STRUCT_MALLINFO
- struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo");
-#endif
-} // extern "C"
-#else // #if defined(__GNUC__) && !defined(__MACH__)
-// Portable wrappers
-void* operator new(size_t size) { return tc_new(size); }
-void operator delete(void* p) __THROW { tc_delete(p); }
-void* operator new[](size_t size) { return tc_newarray(size); }
-void operator delete[](void* p) __THROW { tc_deletearray(p); }
-void* operator new(size_t size, const std::nothrow_t& nt) __THROW {
- return tc_new_nothrow(size, nt);
-}
-void* operator new[](size_t size, const std::nothrow_t& nt) __THROW {
- return tc_newarray_nothrow(size, nt);
-}
-void operator delete(void* ptr, const std::nothrow_t& nt) __THROW {
- return tc_delete_nothrow(ptr, nt);
-}
-void operator delete[](void* ptr, const std::nothrow_t& nt) __THROW {
- return tc_deletearray_nothrow(ptr, nt);
-}
-extern "C" {
- void* malloc(size_t s) __THROW { return tc_malloc(s); }
- void free(void* p) __THROW { tc_free(p); }
- void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); }
- void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); }
- void cfree(void* p) __THROW { tc_cfree(p); }
- void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); }
- void* valloc(size_t s) __THROW { return tc_valloc(s); }
- void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); }
- int posix_memalign(void** r, size_t a, size_t s) __THROW {
- return tc_posix_memalign(r, a, s);
- }
- void malloc_stats(void) __THROW { tc_malloc_stats(); }
- int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); }
-#ifdef HAVE_STRUCT_MALLINFO
- struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); }
-#endif
-} // extern "C"
-#endif // #if defined(__GNUC__)
-
-// Some library routines on RedHat 9 allocate memory using malloc()
-// and free it using __libc_free() (or vice-versa). Since we provide
-// our own implementations of malloc/free, we need to make sure that
-// the __libc_XXX variants (defined as part of glibc) also point to
-// the same implementations.
-#ifdef __GLIBC__ // only glibc defines __libc_*
-extern "C" {
-#ifdef ALIAS
- void* __libc_malloc(size_t size) ALIAS("tc_malloc");
- void __libc_free(void* ptr) ALIAS("tc_free");
- void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc");
- void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc");
- void __libc_cfree(void* ptr) ALIAS("tc_cfree");
- void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign");
- void* __libc_valloc(size_t size) ALIAS("tc_valloc");
- void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc");
- int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign");
-#else // #ifdef ALIAS
- void* __libc_malloc(size_t size) { return malloc(size); }
- void __libc_free(void* ptr) { free(ptr); }
- void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
- void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
- void __libc_cfree(void* ptr) { cfree(ptr); }
- void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
- void* __libc_valloc(size_t size) { return valloc(size); }
- void* __libc_pvalloc(size_t size) { return pvalloc(size); }
- int __posix_memalign(void** r, size_t a, size_t s) {
- return posix_memalign(r, a, s);
- }
-#endif // #ifdef ALIAS
-} // extern "C"
-#endif // ifdef __GLIBC__
-
-#undef ALIAS
-
-#endif // #ifndef(WIN32_DO_PATCHING) && ndef(TCMALLOC_FOR_DEBUGALLOCATION)
-
-
-// ----------------------- IMPLEMENTATION -------------------------------
-
-static int tc_new_mode = 0; // See tc_set_new_mode().
-
-// Routines such as free() and realloc() catch some erroneous pointers
-// passed to them, and invoke the below when they do. (An erroneous pointer
-// won't be caught if it's within a valid span or a stale span for which
-// the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing
-// required) kind of exception handling for these routines.
-namespace {
-void InvalidFree(void* ptr) {
- CRASH("Attempt to free invalid pointer: %p\n", ptr);
-}
-
-size_t InvalidGetSizeForRealloc(void* old_ptr) {
- CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr);
- return 0;
-}
-
-size_t InvalidGetAllocatedSize(void* ptr) {
- CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr);
- return 0;
-}
-} // unnamed namespace
-
-// Extract interesting stats
-struct TCMallocStats {
- uint64_t system_bytes; // Bytes alloced from system
- uint64_t thread_bytes; // Bytes in thread caches
- uint64_t central_bytes; // Bytes in central cache
- uint64_t transfer_bytes; // Bytes in central transfer cache
- uint64_t pageheap_bytes; // Bytes in page heap
- uint64_t metadata_bytes; // Bytes alloced for metadata
-};
-
-// Get stats into "r". Also get per-size-class counts if class_count != NULL
-static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
- r->central_bytes = 0;
- r->transfer_bytes = 0;
- for (int cl = 0; cl < kNumClasses; ++cl) {
- const int length = Static::central_cache()[cl].length();
- const int tc_length = Static::central_cache()[cl].tc_length();
- const size_t size = static_cast<uint64_t>(
- Static::sizemap()->ByteSizeForClass(cl));
- r->central_bytes += (size * length);
- r->transfer_bytes += (size * tc_length);
- if (class_count) class_count[cl] = length + tc_length;
- }
-
- // Add stats from per-thread heaps
- r->thread_bytes = 0;
- { // scope
- SpinLockHolder h(Static::pageheap_lock());
- ThreadCache::GetThreadStats(&r->thread_bytes, class_count);
- }
-
- { //scope
- SpinLockHolder h(Static::pageheap_lock());
- r->system_bytes = Static::pageheap()->SystemBytes();
- r->metadata_bytes = tcmalloc::metadata_system_bytes();
- r->pageheap_bytes = Static::pageheap()->FreeBytes();
- }
-}
-
-// WRITE stats to "out"
-static void DumpStats(TCMalloc_Printer* out, int level) {
- TCMallocStats stats;
- uint64_t class_count[kNumClasses];
- ExtractStats(&stats, (level >= 2 ? class_count : NULL));
-
- static const double MB = 1048576.0;
-
- if (level >= 2) {
- out->printf("------------------------------------------------\n");
- out->printf("Size class breakdown\n");
- out->printf("------------------------------------------------\n");
- uint64_t cumulative = 0;
- for (int cl = 0; cl < kNumClasses; ++cl) {
- if (class_count[cl] > 0) {
- uint64_t class_bytes =
- class_count[cl] * Static::sizemap()->ByteSizeForClass(cl);
- cumulative += class_bytes;
- out->printf("class %3d [ %8" PRIuS " bytes ] : "
- "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
- cl, Static::sizemap()->ByteSizeForClass(cl),
- class_count[cl],
- class_bytes / MB,
- cumulative / MB);
- }
- }
-
- SpinLockHolder h(Static::pageheap_lock());
- Static::pageheap()->Dump(out);
-
- out->printf("------------------------------------------------\n");
- DumpSystemAllocatorStats(out);
- }
-
- const uint64_t bytes_in_use = stats.system_bytes
- - stats.pageheap_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.thread_bytes;
-
- out->printf("------------------------------------------------\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n"
- "MALLOC: %12" PRIu64 " Spans in use\n"
- "MALLOC: %12" PRIu64 " Thread heaps in use\n"
- "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n"
- "------------------------------------------------\n",
- stats.system_bytes, stats.system_bytes / MB,
- bytes_in_use, bytes_in_use / MB,
- stats.pageheap_bytes, stats.pageheap_bytes / MB,
- stats.central_bytes, stats.central_bytes / MB,
- stats.transfer_bytes, stats.transfer_bytes / MB,
- stats.thread_bytes, stats.thread_bytes / MB,
- uint64_t(Static::span_allocator()->inuse()),
- uint64_t(ThreadCache::HeapsInUse()),
- stats.metadata_bytes, stats.metadata_bytes / MB);
-}
-
-static void PrintStats(int level) {
- const int kBufferSize = 16 << 10;
- char* buffer = new char[kBufferSize];
- TCMalloc_Printer printer(buffer, kBufferSize);
- DumpStats(&printer, level);
- ssize_t size = write(STDERR_FILENO, buffer, strlen(buffer));
- // Use size so we don't get an unused variable warning.
- CHECK(size);
- delete[] buffer;
-}
-
-static void** DumpHeapGrowthStackTraces() {
- // Count how much space we need
- int needed_slots = 0;
- {
- SpinLockHolder h(Static::pageheap_lock());
- for (StackTrace* t = Static::growth_stacks();
- t != NULL;
- t = reinterpret_cast<StackTrace*>(
- t->stack[tcmalloc::kMaxStackDepth-1])) {
- needed_slots += 3 + t->depth;
- }
- needed_slots += 100; // Slop in case list grows
- needed_slots += needed_slots/8; // An extra 12.5% slop
- }
-
- void** result = new void*[needed_slots];
- if (result == NULL) {
- MESSAGE("tcmalloc: allocation failed for stack trace slots",
- needed_slots * sizeof(*result));
- return NULL;
- }
-
- SpinLockHolder h(Static::pageheap_lock());
- int used_slots = 0;
- for (StackTrace* t = Static::growth_stacks();
- t != NULL;
- t = reinterpret_cast<StackTrace*>(
- t->stack[tcmalloc::kMaxStackDepth-1])) {
- ASSERT(used_slots < needed_slots); // Need to leave room for terminator
- if (used_slots + 3 + t->depth >= needed_slots) {
- // No more room
- break;
- }
-
- result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
- result[used_slots+1] = reinterpret_cast<void*>(t->size);
- result[used_slots+2] = reinterpret_cast<void*>(t->depth);
- for (int d = 0; d < t->depth; d++) {
- result[used_slots+3+d] = t->stack[d];
- }
- used_slots += 3 + t->depth;
- }
- result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
- return result;
-}
-
-// TCMalloc's support for extra malloc interfaces
-class TCMallocImplementation : public MallocExtension {
- public:
- virtual void GetStats(char* buffer, int buffer_length) {
- ASSERT(buffer_length > 0);
- TCMalloc_Printer printer(buffer, buffer_length);
-
- // Print level one stats unless lots of space is available
- if (buffer_length < 10000) {
- DumpStats(&printer, 1);
- } else {
- DumpStats(&printer, 2);
- }
- }
-
- virtual void** ReadStackTraces(int* sample_period) {
- tcmalloc::StackTraceTable table;
- {
- SpinLockHolder h(Static::pageheap_lock());
- Span* sampled = Static::sampled_objects();
- for (Span* s = sampled->next; s != sampled; s = s->next) {
- table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects));
- }
- }
- *sample_period = ThreadCache::GetCache()->GetSamplePeriod();
- return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock
- }
-
- virtual void** ReadHeapGrowthStackTraces() {
- return DumpHeapGrowthStackTraces();
- }
-
- virtual bool GetNumericProperty(const char* name, size_t* value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "generic.current_allocated_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.pageheap_bytes;
- return true;
- }
-
- if (strcmp(name, "generic.heap_size") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes;
- return true;
- }
-
- if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
- // We assume that bytes in the page heap are not fragmented too
- // badly, and are therefore available for allocation.
- SpinLockHolder l(Static::pageheap_lock());
- *value = Static::pageheap()->FreeBytes();
- return true;
- }
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- SpinLockHolder l(Static::pageheap_lock());
- *value = ThreadCache::overall_thread_cache_size();
- return true;
- }
-
- if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.thread_bytes;
- return true;
- }
-
- return false;
- }
-
- virtual bool SetNumericProperty(const char* name, size_t value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- SpinLockHolder l(Static::pageheap_lock());
- ThreadCache::set_overall_thread_cache_size(value);
- return true;
- }
-
- return false;
- }
-
- virtual void MarkThreadIdle() {
- ThreadCache::BecomeIdle();
- }
-
- virtual void MarkThreadBusy(); // Implemented below
-
- virtual void ReleaseFreeMemory() {
- SpinLockHolder h(Static::pageheap_lock());
- Static::pageheap()->ReleaseFreePages();
- }
-
- virtual void SetMemoryReleaseRate(double rate) {
- FLAGS_tcmalloc_release_rate = rate;
- }
-
- virtual double GetMemoryReleaseRate() {
- return FLAGS_tcmalloc_release_rate;
- }
- virtual size_t GetEstimatedAllocatedSize(size_t size) {
- if (size <= kMaxSize) {
- const size_t cl = Static::sizemap()->SizeClass(size);
- const size_t alloc_size = Static::sizemap()->ByteSizeForClass(cl);
- return alloc_size;
- } else {
- return tcmalloc::pages(size) << kPageShift;
- }
- }
-
- // This just calls GetSizeWithCallback, but because that's in an
- // unnamed namespace, we need to move the definition below it in the
- // file.
- virtual size_t GetAllocatedSize(void* ptr);
-};
-
-// The constructor allocates an object to ensure that initialization
-// runs before main(), and therefore we do not have a chance to become
-// multi-threaded before initialization. We also create the TSD key
-// here. Presumably by the time this constructor runs, glibc is in
-// good enough shape to handle pthread_key_create().
-//
-// The constructor also takes the opportunity to tell STL to use
-// tcmalloc. We want to do this early, before construct time, so
-// all user STL allocations go through tcmalloc (which works really
-// well for STL).
-//
-// The destructor prints stats when the program exits.
-static int tcmallocguard_refcount = 0; // no lock needed: runs before main()
-TCMallocGuard::TCMallocGuard() {
- if (tcmallocguard_refcount++ == 0) {
-#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
- // Check whether the kernel also supports TLS (needs to happen at runtime)
- tcmalloc::CheckIfKernelSupportsTLS();
-#endif
-#ifdef WIN32_DO_PATCHING
- // patch the windows VirtualAlloc, etc.
- PatchWindowsFunctions(); // defined in windows/patch_functions.cc
-#endif
- free(malloc(1));
- ThreadCache::InitTSD();
- free(malloc(1));
- MallocExtension::Register(new TCMallocImplementation);
- }
-}
-
-TCMallocGuard::~TCMallocGuard() {
- if (--tcmallocguard_refcount == 0) {
- const char* env = getenv("MALLOCSTATS");
- if (env != NULL) {
- int level = atoi(env);
- if (level < 1) level = 1;
- PrintStats(level);
- }
- }
-}
-#ifndef WIN32_OVERRIDE_ALLOCATORS
-static TCMallocGuard module_enter_exit_hook;
-#endif
-
-//-------------------------------------------------------------------
-// Helpers for the exported routines below
-//-------------------------------------------------------------------
-
-static Span* DoSampledAllocation(size_t size) {
- // Grab the stack trace outside the heap lock
- StackTrace tmp;
- tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1);
- tmp.size = size;
-
- SpinLockHolder h(Static::pageheap_lock());
- // Allocate span
- Span *span = Static::pageheap()->New(tcmalloc::pages(size == 0 ? 1 : size));
- if (span == NULL) {
- return NULL;
- }
-
- // Allocate stack trace
- StackTrace *stack = Static::stacktrace_allocator()->New();
- if (stack == NULL) {
- // Sampling failed because of lack of memory
- return span;
- }
-
- *stack = tmp;
- span->sample = 1;
- span->objects = stack;
- tcmalloc::DLL_Prepend(Static::sampled_objects(), span);
-
- return span;
-}
-
-static inline bool CheckCachedSizeClass(void *ptr) {
- PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p);
- return cached_value == 0 ||
- cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass;
-}
-
-static inline void* CheckedMallocResult(void *result)
-{
- ASSERT(result == 0 || CheckCachedSizeClass(result));
- return result;
-}
-
-static inline void* SpanToMallocResult(Span *span) {
- Static::pageheap()->CacheSizeClass(span->start, 0);
- return
- CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
-}
-
-// Copy of FLAGS_tcmalloc_large_alloc_report_threshold with
-// automatic increases factored in.
-static int64_t large_alloc_threshold =
- (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold
- ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold);
-
-static void ReportLargeAlloc(Length num_pages, void* result) {
- StackTrace stack;
- stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1);
-
- static const int N = 1000;
- char buffer[N];
- TCMalloc_Printer printer(buffer, N);
- printer.printf("tcmalloc: large alloc %llu bytes == %p @ ",
- static_cast<unsigned long long>(num_pages) << kPageShift,
- result);
- for (int i = 0; i < stack.depth; i++) {
- printer.printf(" %p", stack.stack[i]);
- }
- printer.printf("\n");
- write(STDERR_FILENO, buffer, strlen(buffer));
-}
-
-namespace {
-
-inline void* cpp_alloc(size_t size, bool nothrow);
-inline void* do_malloc(size_t size);
-
-inline void* cpp_or_malloc(size_t size, bool nothrow) {
- return tc_new_mode ? cpp_alloc(size, nothrow) : do_malloc(size);
-}
-
-inline void* cpp_memalign(size_t align, size_t size, bool nothrow);
-inline void* do_memalign(size_t align, size_t size);
-
-inline void* cpp_or_memalign(size_t align, size_t size, bool nothrow) {
- return tc_new_mode ? cpp_memalign(align, size, nothrow) :
- do_memalign(align, size);
-}
-
-// Helper for do_malloc().
-inline void* do_malloc_pages(Length num_pages) {
- Span *span;
- bool report_large = false;
- {
- SpinLockHolder h(Static::pageheap_lock());
- span = Static::pageheap()->New(num_pages);
- const int64 threshold = large_alloc_threshold;
- if (threshold > 0 && num_pages >= (threshold >> kPageShift)) {
- // Increase the threshold by 1/8 every time we generate a report.
- // We cap the threshold at 8GB to avoid overflow problems.
- large_alloc_threshold = (threshold + threshold/8 < 8ll<<30
- ? threshold + threshold/8 : 8ll<<30);
- report_large = true;
- }
- }
-
- void* result = (span == NULL ? NULL : SpanToMallocResult(span));
- if (report_large) {
- ReportLargeAlloc(num_pages, result);
- }
- return result;
-}
-
-inline void* do_malloc(size_t size) {
- void* ret = NULL;
-
- // The following call forces module initialization
- ThreadCache* heap = ThreadCache::GetCache();
- if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
- Span* span = DoSampledAllocation(size);
- if (span != NULL) {
- ret = SpanToMallocResult(span);
- }
- } else if (size <= kMaxSize) {
- // The common case, and also the simplest. This just pops the
- // size-appropriate freelist, after replenishing it if it's empty.
- ret = CheckedMallocResult(heap->Allocate(size));
- } else {
- ret = do_malloc_pages(tcmalloc::pages(size));
- }
- if (ret == NULL) errno = ENOMEM;
- return ret;
-}
-
-inline void* do_calloc(size_t n, size_t elem_size) {
- // Overflow check
- const size_t size = n * elem_size;
- if (elem_size != 0 && size / elem_size != n) return NULL;
-
- void* result = cpp_or_malloc(size, false);
- if (result != NULL) {
- memset(result, 0, size);
- }
- return result;
-}
-
-static inline ThreadCache* GetCacheIfPresent() {
- void* const p = ThreadCache::GetCacheIfPresent();
- return reinterpret_cast<ThreadCache*>(p);
-}
-
-// This lets you call back to a given function pointer if ptr is invalid.
-// It is used primarily by windows code which wants a specialized callback.
-inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) {
- if (ptr == NULL) return;
- ASSERT(Static::pageheap() != NULL); // Should not call free() before malloc()
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- Span* span = NULL;
- size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
-
- if (cl == 0) {
- span = Static::pageheap()->GetDescriptor(p);
- if (!span) {
- // span can be NULL because the pointer passed in is invalid
- // (not something returned by malloc or friends), or because the
- // pointer was allocated with some other allocator besides
- // tcmalloc. The latter can happen if tcmalloc is linked in via
- // a dynamic library, but is not listed last on the link line.
- // In that case, libraries after it on the link line will
- // allocate with libc malloc, but free with tcmalloc's free.
- (*invalid_free_fn)(ptr); // Decide how to handle the bad free request
- return;
- }
- cl = span->sizeclass;
- Static::pageheap()->CacheSizeClass(p, cl);
- }
- if (cl != 0) {
- ASSERT(!Static::pageheap()->GetDescriptor(p)->sample);
- ThreadCache* heap = GetCacheIfPresent();
- if (heap != NULL) {
- heap->Deallocate(ptr, cl);
- } else {
- // Delete directly into central cache
- tcmalloc::SLL_SetNext(ptr, NULL);
- Static::central_cache()[cl].InsertRange(ptr, ptr, 1);
- }
- } else {
- SpinLockHolder h(Static::pageheap_lock());
- ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
- ASSERT(span != NULL && span->start == p);
- if (span->sample) {
- tcmalloc::DLL_Remove(span);
- Static::stacktrace_allocator()->Delete(
- reinterpret_cast<StackTrace*>(span->objects));
- span->objects = NULL;
- }
- Static::pageheap()->Delete(span);
- }
-}
-
-// The default "do_free" that uses the default callback.
-inline void do_free(void* ptr) {
- return do_free_with_callback(ptr, &InvalidFree);
-}
-
-inline size_t GetSizeWithCallback(void* ptr,
- size_t (*invalid_getsize_fn)(void*)) {
- if (ptr == NULL)
- return 0;
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
- if (cl != 0) {
- return Static::sizemap()->ByteSizeForClass(cl);
- } else {
- Span *span = Static::pageheap()->GetDescriptor(p);
- if (span == NULL) { // means we do not own this memory
- return (*invalid_getsize_fn)(ptr);
- } else if (span->sizeclass != 0) {
- Static::pageheap()->CacheSizeClass(p, span->sizeclass);
- return Static::sizemap()->ByteSizeForClass(span->sizeclass);
- } else {
- return span->length << kPageShift;
- }
- }
-}
-
-// This lets you call back to a given function pointer if ptr is invalid.
-// It is used primarily by windows code which wants a specialized callback.
-inline void* do_realloc_with_callback(
- void* old_ptr, size_t new_size,
- void (*invalid_free_fn)(void*),
- size_t (*invalid_get_size_fn)(void*)) {
- // Get the size of the old entry
- const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn);
-
- // Reallocate if the new size is larger than the old size,
- // or if the new size is significantly smaller than the old size.
- // We do hysteresis to avoid resizing ping-pongs:
- // . If we need to grow, grow to max(new_size, old_size * 1.X)
- // . Don't shrink unless new_size < old_size * 0.Y
- // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5.
- const int lower_bound_to_grow = old_size + old_size / 4;
- const int upper_bound_to_shrink = old_size / 2;
- if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) {
- // Need to reallocate.
- void* new_ptr = NULL;
-
- if (new_size > old_size && new_size < lower_bound_to_grow) {
- new_ptr = cpp_or_malloc(lower_bound_to_grow, false);
- }
- if (new_ptr == NULL) {
- // Either new_size is not a tiny increment, or last do_malloc failed.
- new_ptr = cpp_or_malloc(new_size, false);
- }
- if (new_ptr == NULL) {
- return NULL;
- }
- MallocHook::InvokeNewHook(new_ptr, new_size);
- memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
- MallocHook::InvokeDeleteHook(old_ptr);
- // We could use a variant of do_free() that leverages the fact
- // that we already know the sizeclass of old_ptr. The benefit
- // would be small, so don't bother.
- do_free_with_callback(old_ptr, invalid_free_fn);
- return new_ptr;
- } else {
- // We still need to call hooks to report the updated size:
- MallocHook::InvokeDeleteHook(old_ptr);
- MallocHook::InvokeNewHook(old_ptr, new_size);
- return old_ptr;
- }
-}
-
-inline void* do_realloc(void* old_ptr, size_t new_size) {
- return do_realloc_with_callback(old_ptr, new_size,
- &InvalidFree, &InvalidGetSizeForRealloc);
-}
-
-// For use by exported routines below that want specific alignments
-//
-// Note: this code can be slow, and can significantly fragment memory.
-// The expectation is that memalign/posix_memalign/valloc/pvalloc will
-// not be invoked very often. This requirement simplifies our
-// implementation and allows us to tune for expected allocation
-// patterns.
-void* do_memalign(size_t align, size_t size) {
- ASSERT((align & (align - 1)) == 0);
- ASSERT(align > 0);
- if (size + align < size) return NULL; // Overflow
-
- if (Static::pageheap() == NULL) ThreadCache::InitModule();
-
- // Allocate at least one byte to avoid boundary conditions below
- if (size == 0) size = 1;
-
- if (size <= kMaxSize && align < kPageSize) {
- // Search through acceptable size classes looking for one with
- // enough alignment. This depends on the fact that
- // InitSizeClasses() currently produces several size classes that
- // are aligned at powers of two. We will waste time and space if
- // we miss in the size class array, but that is deemed acceptable
- // since memalign() should be used rarely.
- int cl = Static::sizemap()->SizeClass(size);
- while (cl < kNumClasses &&
- ((Static::sizemap()->class_to_size(cl) & (align - 1)) != 0)) {
- cl++;
- }
- if (cl < kNumClasses) {
- ThreadCache* heap = ThreadCache::GetCache();
- return CheckedMallocResult(heap->Allocate(
- Static::sizemap()->class_to_size(cl)));
- }
- }
-
- // We will allocate directly from the page heap
- SpinLockHolder h(Static::pageheap_lock());
-
- if (align <= kPageSize) {
- // Any page-level allocation will be fine
- // TODO: We could put the rest of this page in the appropriate
- // TODO: cache but it does not seem worth it.
- Span* span = Static::pageheap()->New(tcmalloc::pages(size));
- return span == NULL ? NULL : SpanToMallocResult(span);
- }
-
- // Allocate extra pages and carve off an aligned portion
- const Length alloc = tcmalloc::pages(size + align);
- Span* span = Static::pageheap()->New(alloc);
- if (span == NULL) return NULL;
-
- // Skip starting portion so that we end up aligned
- Length skip = 0;
- while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
- skip++;
- }
- ASSERT(skip < alloc);
- if (skip > 0) {
- Span* rest = Static::pageheap()->Split(span, skip);
- Static::pageheap()->Delete(span);
- span = rest;
- }
-
- // Skip trailing portion that we do not need to return
- const Length needed = tcmalloc::pages(size);
- ASSERT(span->length >= needed);
- if (span->length > needed) {
- Span* trailer = Static::pageheap()->Split(span, needed);
- Static::pageheap()->Delete(trailer);
- }
- return SpanToMallocResult(span);
-}
-
-// Helpers for use by exported routines below:
-
-inline void do_malloc_stats() {
- PrintStats(1);
-}
-
-inline int do_mallopt(int cmd, int value) {
- return 1; // Indicates error
-}
-
-#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
-inline struct mallinfo do_mallinfo() {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
-
- // Just some of the fields are filled in.
- struct mallinfo info;
- memset(&info, 0, sizeof(info));
-
- // Unfortunately, the struct contains "int" field, so some of the
- // size values will be truncated.
- info.arena = static_cast<int>(stats.system_bytes);
- info.fsmblks = static_cast<int>(stats.thread_bytes
- + stats.central_bytes
- + stats.transfer_bytes);
- info.fordblks = static_cast<int>(stats.pageheap_bytes);
- info.uordblks = static_cast<int>(stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.pageheap_bytes);
-
- return info;
-}
-#endif // #ifndef HAVE_STRUCT_MALLINFO
-
-static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED);
-
-inline void* cpp_alloc(size_t size, bool nothrow) {
- for (;;) {
- void* p = do_malloc(size);
-#ifdef PREANSINEW
- return p;
-#else
- if (p == NULL) { // allocation failed
- // Get the current new handler. NB: this function is not
- // thread-safe. We make a feeble stab at making it so here, but
- // this lock only protects against tcmalloc interfering with
- // itself, not with other libraries calling set_new_handler.
- std::new_handler nh;
- {
- SpinLockHolder h(&set_new_handler_lock);
- nh = std::set_new_handler(0);
- (void) std::set_new_handler(nh);
- }
-#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
- if (nh) {
- // Since exceptions are disabled, we don't really know if new_handler
- // failed. Assume it will abort if it fails.
- (*nh)();
- continue;
- }
- return 0;
-#else
- // If no new_handler is established, the allocation failed.
- if (!nh) {
- if (nothrow) return 0;
- throw std::bad_alloc();
- }
- // Otherwise, try the new_handler. If it returns, retry the
- // allocation. If it throws std::bad_alloc, fail the allocation.
- // if it throws something else, don't interfere.
- try {
- (*nh)();
- } catch (const std::bad_alloc&) {
- if (!nothrow) throw;
- return p;
- }
-#endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
- } else { // allocation success
- return p;
- }
-#endif // PREANSINEW
- }
-}
-
-inline void* cpp_memalign(size_t align, size_t size, bool nothrow) {
- for (;;) {
- void* p = do_memalign(align, size);
-#ifdef PREANSINEW
- return p;
-#else
- if (p == NULL) { // allocation failed
- // Get the current new handler. NB: this function is not
- // thread-safe. We make a feeble stab at making it so here, but
- // this lock only protects against tcmalloc interfering with
- // itself, not with other libraries calling set_new_handler.
- std::new_handler nh;
- {
- SpinLockHolder h(&set_new_handler_lock);
- nh = std::set_new_handler(0);
- (void) std::set_new_handler(nh);
- }
-#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
- if (nh) {
- // Since exceptions are disabled, we don't really know if new_handler
- // failed. Assume it will abort if it fails.
- (*nh)();
- continue;
- }
- return 0;
-#else
- // If no new_handler is established, the allocation failed.
- if (!nh) {
- if (nothrow) return 0;
- throw std::bad_alloc();
- }
- // Otherwise, try the new_handler. If it returns, retry the
- // allocation. If it throws std::bad_alloc, fail the allocation.
- // if it throws something else, don't interfere.
- try {
- (*nh)();
- } catch (const std::bad_alloc&) {
- if (!nothrow) throw;
- return p;
- }
-#endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
- } else { // allocation success
- return p;
- }
-#endif // PREANSINEW
- }
-}
-
-} // end unnamed namespace
-
-// As promised, the definition of this function, declared above.
-size_t TCMallocImplementation::GetAllocatedSize(void* ptr) {
- return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize);
-}
-
-void TCMallocImplementation::MarkThreadBusy() {
- // Allocate to force the creation of a thread cache, but avoid
- // invoking any hooks.
- do_free(do_malloc(0));
-}
-
-//-------------------------------------------------------------------
-// Exported routines
-//-------------------------------------------------------------------
-
-extern "C" PERFTOOLS_DLL_DECL const char* tc_version(
- int* major, int* minor, const char** patch) __THROW {
- if (major) *major = TC_VERSION_MAJOR;
- if (minor) *minor = TC_VERSION_MINOR;
- if (patch) *patch = TC_VERSION_PATCH;
- return TC_VERSION_STRING;
-}
-
-// CAVEAT: The code structure below ensures that MallocHook methods are always
-// called from the stack frame of the invoked allocation function.
-// heap-checker.cc depends on this to start a stack trace from
-// the call to the (de)allocation function.
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) __THROW {
- void* result = cpp_or_malloc(size, false);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void tc_free(void* ptr) __THROW {
- MallocHook::InvokeDeleteHook(ptr);
- do_free(ptr);
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_calloc(size_t n,
- size_t elem_size) __THROW {
- void* result = do_calloc(n, elem_size);
- MallocHook::InvokeNewHook(result, n * elem_size);
- return result;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) __THROW {
- MallocHook::InvokeDeleteHook(ptr);
- do_free(ptr);
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* old_ptr,
- size_t new_size) __THROW {
- if (old_ptr == NULL) {
- void* result = cpp_or_malloc(new_size, false);
- MallocHook::InvokeNewHook(result, new_size);
- return result;
- }
- if (new_size == 0) {
- MallocHook::InvokeDeleteHook(old_ptr);
- do_free(old_ptr);
- return NULL;
- }
- return do_realloc(old_ptr, new_size);
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_new(size_t size) {
- void* p = cpp_alloc(size, false);
- // We keep this next instruction out of cpp_alloc for a reason: when
- // it's in, and new just calls cpp_alloc, the optimizer may fold the
- // new call into cpp_alloc, which messes up our whole section-based
- // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
- // isn't the last thing this fn calls, and prevents the folding.
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_new_nothrow(
- size_t size, const std::nothrow_t&) __THROW {
- void* p = cpp_alloc(size, true);
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void tc_delete(void* p) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-// Compilers define and use this (via ::operator delete(ptr, nothrow)).
-// But it's really the same as normal delete, so we just do the same thing.
-extern "C" PERFTOOLS_DLL_DECL void tc_delete_nothrow(
- void* p, const std::nothrow_t&) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_newarray(size_t size) {
- void* p = cpp_alloc(size, false);
- // We keep this next instruction out of cpp_alloc for a reason: when
- // it's in, and new just calls cpp_alloc, the optimizer may fold the
- // new call into cpp_alloc, which messes up our whole section-based
- // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
- // isn't the last thing this fn calls, and prevents the folding.
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(
- size_t size, const std::nothrow_t&) __THROW {
- void* p = cpp_alloc(size, true);
- MallocHook::InvokeNewHook(p, size);
- return p;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(
- void* p, const std::nothrow_t&) __THROW {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_memalign(size_t align,
- size_t size) __THROW {
- void* result = cpp_or_memalign(align, size, false);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign(
- void** result_ptr, size_t align, size_t size) __THROW {
- if (((align % sizeof(void*)) != 0) ||
- ((align & (align - 1)) != 0) ||
- (align == 0)) {
- return EINVAL;
- }
-
- void* result = cpp_or_memalign(align, size, false);
- MallocHook::InvokeNewHook(result, size);
- if (result == NULL) {
- return ENOMEM;
- } else {
- *result_ptr = result;
- return 0;
- }
-}
-
-static size_t pagesize = 0;
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_valloc(size_t size) __THROW {
- // Allocate page-aligned object of length >= size bytes
- if (pagesize == 0) pagesize = getpagesize();
- void* result = cpp_or_memalign(pagesize, size, false);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t size) __THROW {
- // Round up size to a multiple of pagesize
- if (pagesize == 0) pagesize = getpagesize();
- if (size == 0) { // pvalloc(0) should allocate one page, according to
- size = pagesize; // http://man.free4web.biz/man3/libmpatrol.3.html
- }
- size = (size + pagesize - 1) & ~(pagesize - 1);
- void* result = cpp_or_memalign(pagesize, size, false);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" PERFTOOLS_DLL_DECL void tc_malloc_stats(void) __THROW {
- do_malloc_stats();
-}
-
-extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) __THROW {
- return do_mallopt(cmd, value);
-}
-
-#ifdef HAVE_STRUCT_MALLINFO
-extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW {
- return do_mallinfo();
-}
-#endif
-
-// This function behaves similarly to MSVC's _set_new_mode.
-// If flag is 0 (default), calls to malloc will behave normally.
-// If flag is 1, calls to malloc will behave like calls to new,
-// and the std_new_handler will be invoked on failure.
-// Returns the previous mode.
-extern "C" PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) __THROW {
- int old_mode = tc_new_mode;
- tc_new_mode = flag;
- return old_mode;
-}
-
-
-// Override __libc_memalign in libc on linux boxes specially.
-// They have a bug in libc that causes them to (very rarely) allocate
-// with __libc_memalign() yet deallocate with free() and the
-// definitions above don't catch it.
-// This function is an exception to the rule of calling MallocHook method
-// from the stack frame of the allocation function;
-// heap-checker handles this special case explicitly.
-#ifndef TCMALLOC_FOR_DEBUGALLOCATION
-static void *MemalignOverride(size_t align, size_t size, const void *caller)
- __THROW ATTRIBUTE_SECTION(google_malloc);
-
-static void *MemalignOverride(size_t align, size_t size, const void *caller)
- __THROW {
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
-#endif // #ifndef TCMALLOC_FOR_DEBUGALLOCATION
diff --git a/third_party/tcmalloc/tcmalloc_unittests.cc b/third_party/tcmalloc/tcmalloc_unittests.cc
deleted file mode 100644
index 919cfab..0000000
--- a/third_party/tcmalloc/tcmalloc_unittests.cc
+++ /dev/null
@@ -1,490 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <algorithm> // for min()
-#include "base/atomicops.h"
-#include "base/logging.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// Number of bits in a size_t.
-static const int kSizeBits = 8 * sizeof(size_t);
-// The maximum size of a size_t.
-static const size_t kMaxSize = ~static_cast<size_t>(0);
-// Maximum positive size of a size_t if it were signed.
-static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1);
-// An allocation size which is not too big to be reasonable.
-static const size_t kNotTooBig = 100000;
-// An allocation size which is just too big.
-static const size_t kTooBig = ~static_cast<size_t>(0);
-
-namespace {
-
-using std::min;
-
-// Fill a buffer of the specified size with a predetermined pattern
-static void Fill(unsigned char* buffer, int n) {
- for (int i = 0; i < n; i++) {
- buffer[i] = (i & 0xff);
- }
-}
-
-// Check that the specified buffer has the predetermined pattern
-// generated by Fill()
-static bool Valid(unsigned char* buffer, int n) {
- for (int i = 0; i < n; i++) {
- if (buffer[i] != (i & 0xff)) {
- return false;
- }
- }
- return true;
-}
-
-// Check that a buffer is completely zeroed.
-static bool IsZeroed(unsigned char* buffer, int n) {
- for (int i = 0; i < n; i++) {
- if (buffer[i] != 0) {
- return false;
- }
- }
- return true;
-}
-
-// Check alignment
-static void CheckAlignment(void* p, int align) {
- EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p) & (align-1));
-}
-
-// Return the next interesting size/delta to check. Returns -1 if no more.
-static int NextSize(int size) {
- if (size < 100)
- return size+1;
-
- if (size < 100000) {
- // Find next power of two
- int power = 1;
- while (power < size)
- power <<= 1;
-
- // Yield (power-1, power, power+1)
- if (size < power-1)
- return power-1;
-
- if (size == power-1)
- return power;
-
- assert(size == power);
- return power+1;
- } else {
- return -1;
- }
-}
-
-#define GG_ULONGLONG(x) static_cast<uint64>(x)
-
-template <class AtomicType>
-static void TestAtomicIncrement() {
- // For now, we just test single threaded execution
-
- // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
- // outside the expected address bounds. This is in particular to
- // test that some future change to the asm code doesn't cause the
- // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
- struct {
- AtomicType prev_word;
- AtomicType count;
- AtomicType next_word;
- } s;
-
- AtomicType prev_word_value, next_word_value;
- memset(&prev_word_value, 0xFF, sizeof(AtomicType));
- memset(&next_word_value, 0xEE, sizeof(AtomicType));
-
- s.prev_word = prev_word_value;
- s.count = 0;
- s.next_word = next_word_value;
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
- EXPECT_EQ(s.count, 1);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
- EXPECT_EQ(s.count, 3);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
- EXPECT_EQ(s.count, 6);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
- EXPECT_EQ(s.count, 3);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
- EXPECT_EQ(s.count, 1);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
- EXPECT_EQ(s.count, 0);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
- EXPECT_EQ(s.count, -1);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
- EXPECT_EQ(s.count, -5);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-
- EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
- EXPECT_EQ(s.count, 0);
- EXPECT_EQ(s.prev_word, prev_word_value);
- EXPECT_EQ(s.next_word, next_word_value);
-}
-
-
-#define NUM_BITS(T) (sizeof(T) * 8)
-
-
-template <class AtomicType>
-static void TestCompareAndSwap() {
- AtomicType value = 0;
- AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
- EXPECT_EQ(1, value);
- EXPECT_EQ(0, prev);
-
- // Use test value that has non-zero bits in both halves, more for testing
- // 64-bit implementation on 32-bit platforms.
- const AtomicType k_test_val = (GG_ULONGLONG(1) <<
- (NUM_BITS(AtomicType) - 2)) + 11;
- value = k_test_val;
- prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
- EXPECT_EQ(k_test_val, value);
- EXPECT_EQ(k_test_val, prev);
-
- value = k_test_val;
- prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
- EXPECT_EQ(5, value);
- EXPECT_EQ(k_test_val, prev);
-}
-
-
-template <class AtomicType>
-static void TestAtomicExchange() {
- AtomicType value = 0;
- AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
- EXPECT_EQ(1, value);
- EXPECT_EQ(0, new_value);
-
- // Use test value that has non-zero bits in both halves, more for testing
- // 64-bit implementation on 32-bit platforms.
- const AtomicType k_test_val = (GG_ULONGLONG(1) <<
- (NUM_BITS(AtomicType) - 2)) + 11;
- value = k_test_val;
- new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
- EXPECT_EQ(k_test_val, value);
- EXPECT_EQ(k_test_val, new_value);
-
- value = k_test_val;
- new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
- EXPECT_EQ(5, value);
- EXPECT_EQ(k_test_val, new_value);
-}
-
-
-template <class AtomicType>
-static void TestAtomicIncrementBounds() {
- // Test increment at the half-width boundary of the atomic type.
- // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
- AtomicType test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2);
- AtomicType value = test_val - 1;
- AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
- EXPECT_EQ(test_val, value);
- EXPECT_EQ(value, new_value);
-
- base::subtle::NoBarrier_AtomicIncrement(&value, -1);
- EXPECT_EQ(test_val - 1, value);
-}
-
-// This is a simple sanity check that values are correct. Not testing
-// atomicity
-template <class AtomicType>
-static void TestStore() {
- const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
- const AtomicType kVal2 = static_cast<AtomicType>(-1);
-
- AtomicType value;
-
- base::subtle::NoBarrier_Store(&value, kVal1);
- EXPECT_EQ(kVal1, value);
- base::subtle::NoBarrier_Store(&value, kVal2);
- EXPECT_EQ(kVal2, value);
-
- base::subtle::Acquire_Store(&value, kVal1);
- EXPECT_EQ(kVal1, value);
- base::subtle::Acquire_Store(&value, kVal2);
- EXPECT_EQ(kVal2, value);
-
- base::subtle::Release_Store(&value, kVal1);
- EXPECT_EQ(kVal1, value);
- base::subtle::Release_Store(&value, kVal2);
- EXPECT_EQ(kVal2, value);
-}
-
-// This is a simple sanity check that values are correct. Not testing
-// atomicity
-template <class AtomicType>
-static void TestLoad() {
- const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
- const AtomicType kVal2 = static_cast<AtomicType>(-1);
-
- AtomicType value;
-
- value = kVal1;
- EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
- value = kVal2;
- EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
-
- value = kVal1;
- EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value));
- value = kVal2;
- EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value));
-
- value = kVal1;
- EXPECT_EQ(kVal1, base::subtle::Release_Load(&value));
- value = kVal2;
- EXPECT_EQ(kVal2, base::subtle::Release_Load(&value));
-}
-
-template <class AtomicType>
-static void TestAtomicOps() {
- TestCompareAndSwap<AtomicType>();
- TestAtomicExchange<AtomicType>();
- TestAtomicIncrementBounds<AtomicType>();
- TestStore<AtomicType>();
- TestLoad<AtomicType>();
-}
-
-static void TestCalloc(size_t n, size_t s, bool ok) {
- char* p = reinterpret_cast<char*>(calloc(n, s));
- if (!ok) {
- EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed";
- } else {
- EXPECT_NE(reinterpret_cast<void*>(NULL), p) <<
- "calloc(n, s) should succeed";
- for (int i = 0; i < n*s; i++) {
- EXPECT_EQ('\0', p[i]);
- }
- free(p);
- }
-}
-
-
-// A global test counter for number of times the NewHandler is called.
-static int news_handled = 0;
-static void TestNewHandler() {
- ++news_handled;
- throw std::bad_alloc();
-}
-
-// Because we compile without exceptions, we expect these will not throw.
-static void TestOneNewWithoutExceptions(void* (*func)(size_t),
- bool should_throw) {
- // success test
- try {
- void* ptr = (*func)(kNotTooBig);
- EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) <<
- "allocation should not have failed.";
- } catch(...) {
- EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
- }
-
- // failure test
- try {
- void* rv = (*func)(kTooBig);
- EXPECT_EQ(NULL, rv);
- EXPECT_EQ(false, should_throw) << "allocation should have thrown.";
- } catch(...) {
- EXPECT_EQ(true, should_throw) << "allocation threw unexpected exception.";
- }
-}
-
-static void TestNothrowNew(void* (*func)(size_t)) {
- news_handled = 0;
-
- // test without new_handler:
- std::new_handler saved_handler = std::set_new_handler(0);
- TestOneNewWithoutExceptions(func, false);
-
- // test with new_handler:
- std::set_new_handler(TestNewHandler);
- TestOneNewWithoutExceptions(func, true);
- EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called.";
- std::set_new_handler(saved_handler);
-}
-
-} // namespace
-
-//-----------------------------------------------------------------------------
-
-TEST(Atomics, AtomicIncrementWord) {
- TestAtomicIncrement<AtomicWord>();
-}
-
-TEST(Atomics, AtomicIncrement32) {
- TestAtomicIncrement<Atomic32>();
-}
-
-TEST(Atomics, AtomicOpsWord) {
- TestAtomicIncrement<AtomicWord>();
-}
-
-TEST(Atomics, AtomicOps32) {
- TestAtomicIncrement<Atomic32>();
-}
-
-TEST(Allocators, Malloc) {
- // Try allocating data with a bunch of alignments and sizes
- for (int size = 1; size < 1048576; size *= 2) {
- unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size));
- CheckAlignment(ptr, 2); // Should be 2 byte aligned
- Fill(ptr, size);
- EXPECT_EQ(true, Valid(ptr, size));
- free(ptr);
- }
-}
-
-TEST(Allocators, Calloc) {
- TestCalloc(0, 0, true);
- TestCalloc(0, 1, true);
- TestCalloc(1, 1, true);
- TestCalloc(1<<10, 0, true);
- TestCalloc(1<<20, 0, true);
- TestCalloc(0, 1<<10, true);
- TestCalloc(0, 1<<20, true);
- TestCalloc(1<<20, 2, true);
- TestCalloc(2, 1<<20, true);
- TestCalloc(1000, 1000, true);
-
- TestCalloc(kMaxSize, 2, false);
- TestCalloc(2, kMaxSize, false);
- TestCalloc(kMaxSize, kMaxSize, false);
-
- TestCalloc(kMaxSignedSize, 3, false);
- TestCalloc(3, kMaxSignedSize, false);
- TestCalloc(kMaxSignedSize, kMaxSignedSize, false);
-}
-
-TEST(Allocators, New) {
- TestNothrowNew(&::operator new);
- TestNothrowNew(&::operator new[]);
-}
-
-// This makes sure that reallocing a small number of bytes in either
-// direction doesn't cause us to allocate new memory.
-TEST(Allocators, Realloc1) {
- int start_sizes[] = { 100, 1000, 10000, 100000 };
- int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
-
- for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) {
- void* p = malloc(start_sizes[s]);
- CHECK(p);
- // The larger the start-size, the larger the non-reallocing delta.
- for (int d = 0; d < s*2; ++d) {
- void* new_p = realloc(p, start_sizes[s] + deltas[d]);
- CHECK(p == new_p); // realloc should not allocate new memory
- }
- // Test again, but this time reallocing smaller first.
- for (int d = 0; d < s*2; ++d) {
- void* new_p = realloc(p, start_sizes[s] - deltas[d]);
- CHECK(p == new_p); // realloc should not allocate new memory
- }
- free(p);
- }
-}
-
-TEST(Allocators, Realloc2) {
- for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
- for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
- unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size));
- Fill(src, src_size);
- unsigned char* dst =
- reinterpret_cast<unsigned char*>(realloc(src, dst_size));
- EXPECT_EQ(true, Valid(dst, min(src_size, dst_size)));
- Fill(dst, dst_size);
- EXPECT_EQ(true, Valid(dst, dst_size));
- if (dst != NULL) free(dst);
- }
- }
-
- // Now make sure realloc works correctly even when we overflow the
- // packed cache, so some entries are evicted from the cache.
- // The cache has 2^12 entries, keyed by page number.
- const int kNumEntries = 1 << 14;
- int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries));
- int sum = 0;
- for (int i = 0; i < kNumEntries; i++) {
- // no page size is likely to be bigger than 8192?
- p[i] = reinterpret_cast<int*>(malloc(8192));
- p[i][1000] = i; // use memory deep in the heart of p
- }
- for (int i = 0; i < kNumEntries; i++) {
- p[i] = reinterpret_cast<int*>(realloc(p[i], 9000));
- }
- for (int i = 0; i < kNumEntries; i++) {
- sum += p[i][1000];
- free(p[i]);
- }
- EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even
- free(p);
-}
-
-TEST(Allocators, ReallocZero) {
- // Test that realloc to zero does not return NULL.
- for (int size = 0; size >= 0; size = NextSize(size)) {
- char* ptr = reinterpret_cast<char*>(malloc(size));
- EXPECT_NE(static_cast<char*>(NULL), ptr);
- ptr = reinterpret_cast<char*>(realloc(ptr, 0));
- EXPECT_NE(static_cast<char*>(NULL), ptr);
- if (ptr)
- free(ptr);
- }
-}
-
-#ifdef WIN32
-// Test recalloc
-TEST(Allocators, Recalloc) {
- for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
- for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
- unsigned char* src =
- reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size));
- EXPECT_EQ(true, IsZeroed(src, src_size));
- Fill(src, src_size);
- unsigned char* dst =
- reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size));
- EXPECT_EQ(true, Valid(dst, min(src_size, dst_size)));
- Fill(dst, dst_size);
- EXPECT_EQ(true, Valid(dst, dst_size));
- if (dst != NULL)
- free(dst);
- }
- }
-}
-#endif
-
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
-
diff --git a/third_party/tcmalloc/unittest_utils.cc b/third_party/tcmalloc/unittest_utils.cc
deleted file mode 100644
index fd8c263..0000000
--- a/third_party/tcmalloc/unittest_utils.cc
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The unittests need a this in order to link up without pulling in tons
-// of other libraries
-
-#include <config.h>
-
-inline int snprintf(char* buffer, size_t count, const char* format, ...) {
- int result;
- va_list args;
- va_start(args, format);
- result = _vsnprintf(buffer, count, format, args);
- va_end(args);
- return result;
-}
-
diff --git a/third_party/tcmalloc/win_allocator.cc b/third_party/tcmalloc/win_allocator.cc
deleted file mode 100644
index 8ae653a..0000000
--- a/third_party/tcmalloc/win_allocator.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a simple allocator based on the windows heap.
-
-extern "C" {
-
-HANDLE win_heap;
-
-bool win_heap_init(bool use_lfh) {
- win_heap = HeapCreate(0, 0, 0);
- if (win_heap == NULL)
- return false;
-
- if (use_lfh) {
- ULONG enable_lfh = 2;
- HeapSetInformation(win_heap, HeapCompatibilityInformation,
- &enable_lfh, sizeof(enable_lfh));
- // NOTE: Setting LFH may fail. Vista already has it enabled.
- // And under the debugger, it won't use LFH. So we
- // ignore any errors.
- }
-
- return true;
-}
-
-void* win_heap_malloc(size_t size) {
- return HeapAlloc(win_heap, 0, size);
-}
-
-void win_heap_free(void* size) {
- HeapFree(win_heap, 0, size);
-}
-
-void* win_heap_realloc(void* ptr, size_t size) {
- if (!ptr)
- return win_heap_malloc(size);
- if (!size) {
- win_heap_free(ptr);
- return NULL;
- }
- return HeapReAlloc(win_heap, 0, ptr, size);
-}
-
-size_t win_heap_msize(void* ptr) {
- return HeapSize(win_heap, 0, ptr);
-}
-
-} // extern "C"