summaryrefslogtreecommitdiffstats
path: root/base/allocator
diff options
context:
space:
mode:
Diffstat (limited to 'base/allocator')
-rw-r--r--base/allocator/README59
-rw-r--r--base/allocator/allocator.gyp410
-rw-r--r--base/allocator/allocator_shim.cc266
-rw-r--r--base/allocator/allocator_unittests.cc490
-rw-r--r--base/allocator/generic_allocators.cc137
-rw-r--r--base/allocator/prep_libc.sh39
-rw-r--r--base/allocator/unittest_utils.cc18
-rw-r--r--base/allocator/win_allocator.cc50
8 files changed, 1469 insertions, 0 deletions
diff --git a/base/allocator/README b/base/allocator/README
new file mode 100644
index 0000000..ec8a707
--- /dev/null
+++ b/base/allocator/README
@@ -0,0 +1,59 @@
+Notes about the Chrome memory allocator.
+
+Background
+----------
+We use this library as a generic way to fork into any of several allocators.
+Currently we can, at runtime, switch between:
+ the default windows allocator
+ the windows low-fragmentation-heap
+ tcmalloc
+ jemalloc (the heap used most notably within Mozilla Firefox)
+
+The mechanism for hooking LIBCMT in windows is rather tricky. The core
+problem is that by default, the windows library does not declare malloc and
+free as weak symbols. Because of this, they cannot be overriden. To work
+around this, we start with the LIBCMT.LIB, and manually remove all allocator
+related functions from it using the visual studio library tool. Once removed,
+we can now link against the library and provide custom versions of the
+allocator related functionality.
+
+
+Source code
+-----------
+This directory contains just the allocator (i.e. shim) layer that switches
+between the different underlying memory allocation implementations.
+
+The tcmalloc and jemalloc libraries originate outside of Chromium
+and exist in ../../third_party/tcmalloc and ../../third_party/jemalloc
+(currently, the actual locations are defined in the allocator.gyp file).
+The third party sources use a vendor-branch SCM pattern to track
+Chromium-specific changes independently from upstream changes.
+
+The general intent is to push local changes upstream so that over
+time we no longer need any forked files.
+
+
+Adding a new allocator
+----------------------
+Adding a new allocator requires definition of the following five functions:
+
+ extern "C" {
+ bool init();
+ void* malloc(size_t s);
+ void* realloc(void* p, size_t s);
+ void free(void* s);
+ size_t msize(void* p);
+ }
+
+All other allocation related functions (new/delete/calloc/etc) have been
+implemented generically to work across all allocators.
+
+
+Usage
+-----
+You can use the different allocators by setting the environment variable
+CHROME_ALLOCATOR to:
+ "tcmalloc" - TC Malloc (default)
+ "jemalloc" - JE Malloc
+ "winheap" - Windows default heap
+ "winlfh" - Windows Low-Fragmentation heap
diff --git a/base/allocator/allocator.gyp b/base/allocator/allocator.gyp
new file mode 100644
index 0000000..fe1aae8
--- /dev/null
+++ b/base/allocator/allocator.gyp
@@ -0,0 +1,410 @@
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'jemalloc_dir': '../../third_party/jemalloc/chromium',
+ 'tcmalloc_dir': '../../third_party/tcmalloc/chromium',
+ },
+ 'targets': [
+ {
+ 'target_name': 'allocator',
+ 'type': '<(library)',
+ 'msvs_guid': 'C564F145-9172-42C3-BFCB-60FDEA124321',
+ 'include_dirs': [
+ '.',
+ '<(tcmalloc_dir)/src/base',
+ '<(tcmalloc_dir)/src',
+ '../..',
+ ],
+ 'direct_dependent_settings': {
+ 'configurations': {
+ 'Common_Base': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'IgnoreDefaultLibraryNames': ['libcmtd.lib', 'libcmt.lib'],
+ 'AdditionalDependencies': [
+ '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib'
+ ],
+ },
+ },
+ },
+ },
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': [
+ ['PERFTOOLS_DLL_DECL', '']
+ ],
+ }],
+ ],
+ },
+ 'sources': [
+ # Generated for our configuration from tcmalloc's build
+ # and checked in.
+ '<(tcmalloc_dir)/src/config.h',
+ '<(tcmalloc_dir)/src/config_linux.h',
+ '<(tcmalloc_dir)/src/config_win.h',
+
+ # all tcmalloc native and forked files
+ '<(tcmalloc_dir)/src/addressmap-inl.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86-msvc.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86.cc',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
+ '<(tcmalloc_dir)/src/base/atomicops.h',
+ '<(tcmalloc_dir)/src/base/basictypes.h',
+ '<(tcmalloc_dir)/src/base/commandlineflags.h',
+ '<(tcmalloc_dir)/src/base/cycleclock.h',
+ '<(tcmalloc_dir)/src/base/dynamic_annotations.cc',
+ '<(tcmalloc_dir)/src/base/dynamic_annotations.h',
+ '<(tcmalloc_dir)/src/base/elfcore.h',
+ '<(tcmalloc_dir)/src/base/googleinit.h',
+ '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
+ '<(tcmalloc_dir)/src/base/linuxthreads.cc',
+ '<(tcmalloc_dir)/src/base/linuxthreads.h',
+ '<(tcmalloc_dir)/src/base/logging.cc',
+ '<(tcmalloc_dir)/src/base/logging.h',
+ '<(tcmalloc_dir)/src/base/low_level_alloc.cc',
+ '<(tcmalloc_dir)/src/base/low_level_alloc.h',
+ '<(tcmalloc_dir)/src/base/simple_mutex.h',
+ '<(tcmalloc_dir)/src/base/spinlock.cc',
+ '<(tcmalloc_dir)/src/base/spinlock.h',
+ '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
+ '<(tcmalloc_dir)/src/base/stl_allocator.h',
+ '<(tcmalloc_dir)/src/base/sysinfo.cc',
+ '<(tcmalloc_dir)/src/base/sysinfo.h',
+ '<(tcmalloc_dir)/src/base/thread_annotations.h',
+ '<(tcmalloc_dir)/src/base/thread_lister.c',
+ '<(tcmalloc_dir)/src/base/thread_lister.h',
+ '<(tcmalloc_dir)/src/base/vdso_support.cc',
+ '<(tcmalloc_dir)/src/base/vdso_support.h',
+ '<(tcmalloc_dir)/src/central_freelist.cc',
+ '<(tcmalloc_dir)/src/central_freelist.h',
+ '<(tcmalloc_dir)/src/common.cc',
+ '<(tcmalloc_dir)/src/common.h',
+ '<(tcmalloc_dir)/src/debugallocation.cc',
+ '<(tcmalloc_dir)/src/getpc.h',
+ '<(tcmalloc_dir)/src/google/heap-checker.h',
+ '<(tcmalloc_dir)/src/google/heap-profiler.h',
+ '<(tcmalloc_dir)/src/google/malloc_extension.h',
+ '<(tcmalloc_dir)/src/google/malloc_extension_c.h',
+ '<(tcmalloc_dir)/src/google/malloc_hook.h',
+ '<(tcmalloc_dir)/src/google/malloc_hook_c.h',
+ '<(tcmalloc_dir)/src/google/profiler.h',
+ '<(tcmalloc_dir)/src/google/stacktrace.h',
+ '<(tcmalloc_dir)/src/google/tcmalloc.h',
+ '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
+ '<(tcmalloc_dir)/src/heap-checker.cc',
+ '<(tcmalloc_dir)/src/heap-profile-table.cc',
+ '<(tcmalloc_dir)/src/heap-profile-table.h',
+ '<(tcmalloc_dir)/src/heap-profiler.cc',
+ '<(tcmalloc_dir)/src/internal_logging.cc',
+ '<(tcmalloc_dir)/src/internal_logging.h',
+ '<(tcmalloc_dir)/src/linked_list.h',
+ '<(tcmalloc_dir)/src/malloc_extension.cc',
+ '<(tcmalloc_dir)/src/malloc_hook-inl.h',
+ '<(tcmalloc_dir)/src/malloc_hook.cc',
+ '<(tcmalloc_dir)/src/maybe_threads.cc',
+ '<(tcmalloc_dir)/src/maybe_threads.h',
+ '<(tcmalloc_dir)/src/memfs_malloc.cc',
+ '<(tcmalloc_dir)/src/memory_region_map.cc',
+ '<(tcmalloc_dir)/src/memory_region_map.h',
+ '<(tcmalloc_dir)/src/packed-cache-inl.h',
+ '<(tcmalloc_dir)/src/page_heap.cc',
+ '<(tcmalloc_dir)/src/page_heap.h',
+ '<(tcmalloc_dir)/src/page_heap_allocator.h',
+ '<(tcmalloc_dir)/src/pagemap.h',
+ '<(tcmalloc_dir)/src/profile-handler.cc',
+ '<(tcmalloc_dir)/src/profile-handler.h',
+ '<(tcmalloc_dir)/src/profiledata.cc',
+ '<(tcmalloc_dir)/src/profiledata.h',
+ '<(tcmalloc_dir)/src/profiler.cc',
+ '<(tcmalloc_dir)/src/raw_printer.cc',
+ '<(tcmalloc_dir)/src/raw_printer.h',
+ '<(tcmalloc_dir)/src/sampler.cc',
+ '<(tcmalloc_dir)/src/sampler.h',
+ '<(tcmalloc_dir)/src/span.cc',
+ '<(tcmalloc_dir)/src/span.h',
+ '<(tcmalloc_dir)/src/stack_trace_table.cc',
+ '<(tcmalloc_dir)/src/stack_trace_table.h',
+ '<(tcmalloc_dir)/src/stacktrace.cc',
+ '<(tcmalloc_dir)/src/stacktrace.h',
+ '<(tcmalloc_dir)/src/stacktrace_config.h',
+ '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
+ '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_x86_64-inl.h',
+ '<(tcmalloc_dir)/src/static_vars.cc',
+ '<(tcmalloc_dir)/src/static_vars.h',
+ '<(tcmalloc_dir)/src/symbolize.cc',
+ '<(tcmalloc_dir)/src/symbolize.h',
+ '<(tcmalloc_dir)/src/symbolize_linux.cc',
+ '<(tcmalloc_dir)/src/system-alloc.cc',
+ '<(tcmalloc_dir)/src/system-alloc.h',
+ '<(tcmalloc_dir)/src/tcmalloc.cc',
+ '<(tcmalloc_dir)/src/tcmalloc_guard.h',
+ '<(tcmalloc_dir)/src/tcmalloc_linux.cc',
+ '<(tcmalloc_dir)/src/thread_cache.cc',
+ '<(tcmalloc_dir)/src/thread_cache.h',
+ '<(tcmalloc_dir)/src/windows/config.h',
+ '<(tcmalloc_dir)/src/windows/get_mangled_names.cc',
+ '<(tcmalloc_dir)/src/windows/google/tcmalloc.h',
+ '<(tcmalloc_dir)/src/windows/ia32_modrm_map.cc',
+ '<(tcmalloc_dir)/src/windows/ia32_opcode_map.cc',
+ '<(tcmalloc_dir)/src/windows/mingw.h',
+ '<(tcmalloc_dir)/src/windows/mini_disassembler.cc',
+ '<(tcmalloc_dir)/src/windows/mini_disassembler.h',
+ '<(tcmalloc_dir)/src/windows/mini_disassembler_types.h',
+ '<(tcmalloc_dir)/src/windows/override_functions.cc',
+ '<(tcmalloc_dir)/src/windows/patch_functions.cc',
+ '<(tcmalloc_dir)/src/windows/port.cc',
+ '<(tcmalloc_dir)/src/windows/port.h',
+ '<(tcmalloc_dir)/src/windows/preamble_patcher.cc',
+ '<(tcmalloc_dir)/src/windows/preamble_patcher.h',
+ '<(tcmalloc_dir)/src/windows/preamble_patcher_with_stub.cc',
+
+ # jemalloc files
+ '<(jemalloc_dir)/jemalloc.c',
+ '<(jemalloc_dir)/jemalloc.h',
+ '<(jemalloc_dir)/ql.h',
+ '<(jemalloc_dir)/qr.h',
+ '<(jemalloc_dir)/rb.h',
+
+ 'allocator_shim.cc',
+ 'generic_allocators.cc',
+ 'win_allocator.cc',
+ ],
+ # sources! means that these are not compiled directly.
+ 'sources!': [
+ # Included by allocator_shim.cc for maximal inlining.
+ 'generic_allocators.cc',
+ 'win_allocator.cc',
+ '<(tcmalloc_dir)/src/tcmalloc.cc',
+
+ # Unneeded on Windows, symbolize_linux.cc used there instead.
+ '<(tcmalloc_dir)/src/symbolize.cc',
+
+ # We simply don't use these, but list them above so that IDE
+ # users can view the full available source for reference, etc.
+ '<(tcmalloc_dir)/src/addressmap-inl.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86-msvc.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86.cc',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
+ '<(tcmalloc_dir)/src/base/atomicops.h',
+ '<(tcmalloc_dir)/src/base/basictypes.h',
+ '<(tcmalloc_dir)/src/base/commandlineflags.h',
+ '<(tcmalloc_dir)/src/base/cycleclock.h',
+ '<(tcmalloc_dir)/src/base/elfcore.h',
+ '<(tcmalloc_dir)/src/base/googleinit.h',
+ '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
+ '<(tcmalloc_dir)/src/base/simple_mutex.h',
+ '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
+ '<(tcmalloc_dir)/src/base/stl_allocator.h',
+ '<(tcmalloc_dir)/src/base/thread_annotations.h',
+ '<(tcmalloc_dir)/src/debugallocation.cc',
+ '<(tcmalloc_dir)/src/getpc.h',
+ '<(tcmalloc_dir)/src/google/heap-checker.h',
+ '<(tcmalloc_dir)/src/google/heap-profiler.h',
+ '<(tcmalloc_dir)/src/google/malloc_extension_c.h',
+ '<(tcmalloc_dir)/src/google/malloc_hook.h',
+ '<(tcmalloc_dir)/src/google/malloc_hook_c.h',
+ '<(tcmalloc_dir)/src/google/profiler.h',
+ '<(tcmalloc_dir)/src/google/stacktrace.h',
+ '<(tcmalloc_dir)/src/memfs_malloc.cc',
+ '<(tcmalloc_dir)/src/packed-cache-inl.h',
+ '<(tcmalloc_dir)/src/page_heap_allocator.h',
+ '<(tcmalloc_dir)/src/pagemap.h',
+ '<(tcmalloc_dir)/src/stacktrace_config.h',
+ '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
+ '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_x86_64-inl.h',
+ '<(tcmalloc_dir)/src/tcmalloc_guard.h',
+ '<(tcmalloc_dir)/src/windows/config.h',
+ '<(tcmalloc_dir)/src/windows/google/tcmalloc.h',
+ '<(tcmalloc_dir)/src/windows/get_mangled_names.cc',
+ '<(tcmalloc_dir)/src/windows/ia32_modrm_map.cc',
+ '<(tcmalloc_dir)/src/windows/ia32_opcode_map.cc',
+ '<(tcmalloc_dir)/src/windows/mingw.h',
+ '<(tcmalloc_dir)/src/windows/mini_disassembler.cc',
+ '<(tcmalloc_dir)/src/windows/mini_disassembler.h',
+ '<(tcmalloc_dir)/src/windows/mini_disassembler_types.h',
+ '<(tcmalloc_dir)/src/windows/override_functions.cc',
+ '<(tcmalloc_dir)/src/windows/patch_functions.cc',
+ '<(tcmalloc_dir)/src/windows/preamble_patcher.cc',
+ '<(tcmalloc_dir)/src/windows/preamble_patcher.h',
+ '<(tcmalloc_dir)/src/windows/preamble_patcher_with_stub.cc',
+ ],
+ 'msvs_settings': {
+ # TODO(sgk): merge this with build/common.gypi settings
+ 'VCLibrarianTool=': {
+ 'AdditionalOptions': ['/ignore:4006,4221'],
+ 'AdditionalLibraryDirectories':
+ ['<(DEPTH)/third_party/platformsdk_win2008_6_1/files/Lib'],
+ },
+ 'VCLinkerTool': {
+ 'AdditionalOptions': ['/ignore:4006'],
+ },
+ },
+ 'configurations': {
+ 'Debug_Base': {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': '0',
+ },
+ },
+ },
+ },
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': [
+ ['PERFTOOLS_DLL_DECL', '']
+ ],
+ 'dependencies': [
+ 'libcmt',
+ ],
+ 'include_dirs': [
+ '<(tcmalloc_dir)/src/windows',
+ ],
+ 'sources!': [
+ '<(tcmalloc_dir)/src/base/linuxthreads.cc',
+ '<(tcmalloc_dir)/src/base/linuxthreads.h',
+ '<(tcmalloc_dir)/src/base/vdso_support.cc',
+ '<(tcmalloc_dir)/src/base/vdso_support.h',
+ '<(tcmalloc_dir)/src/maybe_threads.cc',
+ '<(tcmalloc_dir)/src/maybe_threads.h',
+ '<(tcmalloc_dir)/src/symbolize.h',
+ '<(tcmalloc_dir)/src/system-alloc.cc',
+ '<(tcmalloc_dir)/src/system-alloc.h',
+
+ # don't use linux forked version
+ '<(tcmalloc_dir)/src/tcmalloc_linux.cc',
+ '<(tcmalloc_dir)/src/symbolize_linux.cc',
+
+ # heap-profiler/checker/cpuprofiler
+ '<(tcmalloc_dir)/src/base/thread_lister.c',
+ '<(tcmalloc_dir)/src/base/thread_lister.h',
+ '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
+ '<(tcmalloc_dir)/src/heap-checker.cc',
+ '<(tcmalloc_dir)/src/heap-profiler.cc',
+ '<(tcmalloc_dir)/src/memory_region_map.cc',
+ '<(tcmalloc_dir)/src/memory_region_map.h',
+ '<(tcmalloc_dir)/src/profiledata.cc',
+ '<(tcmalloc_dir)/src/profiledata.h',
+ '<(tcmalloc_dir)/src/profile-handler.cc',
+ '<(tcmalloc_dir)/src/profile-handler.h',
+ '<(tcmalloc_dir)/src/profiler.cc',
+ ],
+ }],
+ ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+ 'sources!': [
+ '<(tcmalloc_dir)/src/system-alloc.h',
+ '<(tcmalloc_dir)/src/windows/port.cc',
+ '<(tcmalloc_dir)/src/windows/port.h',
+
+ # TODO(willchan): Support allocator shim later on.
+ 'allocator_shim.cc',
+
+ # TODO(willchan): support jemalloc on other platforms
+ # jemalloc files
+ '<(jemalloc_dir)/jemalloc.c',
+ '<(jemalloc_dir)/jemalloc.h',
+ '<(jemalloc_dir)/ql.h',
+ '<(jemalloc_dir)/qr.h',
+ '<(jemalloc_dir)/rb.h',
+
+ # TODO(willchan): Return to using this when page_heap_linux.cc
+ # becomes unnecessary.
+ '<(tcmalloc_dir)/src/page_heap.cc',
+ ],
+ # TODO(willchan): This is actually just a branched copy of the
+ # vanilla upstream page_heap.cc. The current forked copy of
+ # page_heap.cc has Windows-specific code in it so Linux can't
+ # use it. These need to be refactored so we can track changes
+ # to the upstream page_heap.cc without duplication.
+ 'sources': [
+ '<(tcmalloc_dir)/src/page_heap_linux.cc',
+ ],
+ 'cflags!': [
+ '-fvisibility=hidden',
+ ],
+ 'link_settings': {
+ 'ldflags': [
+ # Don't let linker rip this symbol out, otherwise the heap&cpu
+ # profilers will not initialize properly on startup.
+ '-Wl,-uIsHeapProfilerRunning,-uProfilerStart',
+ # Do the same for heap leak checker.
+ '-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi',
+ '-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl',
+ ],
+ },
+ }],
+ ],
+ },
+ {
+ 'target_name': 'allocator_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'allocator',
+ '../../testing/gtest.gyp:gtest',
+ ],
+ 'include_dirs': [
+ '.',
+ '<(tcmalloc_dir)/src/base',
+ '<(tcmalloc_dir)/src',
+ '../..',
+ ],
+ 'msvs_guid': 'E99DA267-BE90-4F45-1294-6919DB2C9999',
+ 'sources': [
+ 'unittest_utils.cc',
+ 'allocator_unittests.cc',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'targets': [
+ {
+ 'target_name': 'libcmt',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'libcmt',
+ 'inputs': [
+ 'prep_libc.sh',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib',
+ ],
+ 'action': [
+ './prep_libc.sh',
+ '$(VCInstallDir)lib',
+ '<(SHARED_INTERMEDIATE_DIR)/allocator',
+ ],
+ },
+ ],
+ },
+ ],
+ }],
+ ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
new file mode 100644
index 0000000..39161d4
--- /dev/null
+++ b/base/allocator/allocator_shim.cc
@@ -0,0 +1,266 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <config.h>
+
+// When defined, different heap allocators can be used via an environment
+// variable set before running the program. This may reduce the amount
+// of inlining that we get with malloc/free/etc. Disabling makes it
+// so that only tcmalloc can be used.
+#define ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
+
+// TODO(mbelshe): Ensure that all calls to tcmalloc have the proper call depth
+// from the "user code" so that debugging tools (HeapChecker) can work.
+
+// __THROW is defined in glibc systems. It means, counter-intuitively,
+// "This function will never throw an exception." It's an optional
+// optimization tool, but we may need to use it to match glibc prototypes.
+#ifndef __THROW // I guess we're not on a glibc system
+# define __THROW // __THROW is just an optimization, so ok to make it ""
+#endif
+
+// new_mode behaves similarly to MSVC's _set_new_mode.
+// If flag is 0 (default), calls to malloc will behave normally.
+// If flag is 1, calls to malloc will behave like calls to new,
+// and the std_new_handler will be invoked on failure.
+// Can be set by calling _set_new_mode().
+static int new_mode = 0;
+
+typedef enum {
+ TCMALLOC, // TCMalloc is the default allocator.
+ JEMALLOC, // JEMalloc
+ WINDEFAULT, // Windows Heap
+ WINLFH, // Windows LFH Heap
+} Allocator;
+
+// This is the default allocator.
+static Allocator allocator = TCMALLOC;
+
+// We include tcmalloc and the win_allocator to get as much inlining as
+// possible.
+#include "tcmalloc.cc"
+#include "win_allocator.cc"
+
+// Forward declarations from jemalloc.
+extern "C" {
+void* je_malloc(size_t s);
+void* je_realloc(void* p, size_t s);
+void je_free(void* s);
+size_t je_msize(void* p);
+bool je_malloc_init_hard();
+}
+
+extern "C" {
+
+// Call the new handler, if one has been set.
+// Returns true on successfully calling the handler, false otherwise.
+inline bool call_new_handler(bool nothrow) {
+ // Get the current new handler. NB: this function is not
+ // thread-safe. We make a feeble stab at making it so here, but
+ // this lock only protects against tcmalloc interfering with
+ // itself, not with other libraries calling set_new_handler.
+ std::new_handler nh;
+ {
+ SpinLockHolder h(&set_new_handler_lock);
+ nh = std::set_new_handler(0);
+ (void) std::set_new_handler(nh);
+ }
+#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
+ if (!nh)
+ return false;
+ // Since exceptions are disabled, we don't really know if new_handler
+ // failed. Assume it will abort if it fails.
+ (*nh)();
+ return false; // break out of the retry loop.
+#else
+ // If no new_handler is established, the allocation failed.
+ if (!nh) {
+ if (nothrow)
+ return 0;
+ throw std::bad_alloc();
+ }
+ // Otherwise, try the new_handler. If it returns, retry the
+ // allocation. If it throws std::bad_alloc, fail the allocation.
+ // if it throws something else, don't interfere.
+ try {
+ (*nh)();
+ } catch (const std::bad_alloc&) {
+ if (!nothrow)
+ throw;
+ return p;
+ }
+#endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
+}
+
+void* malloc(size_t size) __THROW {
+ void* ptr;
+ for (;;) {
+#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
+ switch (allocator) {
+ case JEMALLOC:
+ ptr = je_malloc(size);
+ break;
+ case WINDEFAULT:
+ case WINLFH:
+ ptr = win_heap_malloc(size);
+ break;
+ case TCMALLOC:
+ default:
+ ptr = do_malloc(size);
+ break;
+ }
+#else
+ // TCMalloc case.
+ ptr = do_malloc(size);
+#endif
+ if (ptr)
+ return ptr;
+
+ if (!new_mode || !call_new_handler(true))
+ break;
+ }
+ return ptr;
+}
+
+void free(void* p) __THROW {
+#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
+ switch (allocator) {
+ case JEMALLOC:
+ je_free(p);
+ return;
+ case WINDEFAULT:
+ case WINLFH:
+ win_heap_free(p);
+ return;
+ }
+#endif
+ // TCMalloc case.
+ do_free(p);
+}
+
+void* realloc(void* ptr, size_t size) __THROW {
+ // Webkit is brittle for allocators that return NULL for malloc(0). The
+ // realloc(0, 0) code path does not guarantee a non-NULL return, so be sure
+ // to call malloc for this case.
+ if (!ptr)
+ return malloc(size);
+
+ void* new_ptr;
+ for (;;) {
+#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
+ switch (allocator) {
+ case JEMALLOC:
+ new_ptr = je_realloc(ptr, size);
+ break;
+ case WINDEFAULT:
+ case WINLFH:
+ new_ptr = win_heap_realloc(ptr, size);
+ break;
+ case TCMALLOC:
+ default:
+ new_ptr = do_realloc(ptr, size);
+ break;
+ }
+#else
+ // TCMalloc case.
+ new_ptr = do_realloc(ptr, size);
+#endif
+
+ // Subtle warning: NULL return does not alwas indicate out-of-memory. If
+ // the requested new size is zero, realloc should free the ptr and return
+ // NULL.
+ if (new_ptr || !size)
+ return new_ptr;
+ if (!new_mode || !call_new_handler(true))
+ break;
+ }
+ return new_ptr;
+}
+
+// TODO(mbelshe): Implement this for other allocators.
+void malloc_stats(void) __THROW {
+#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
+ switch (allocator) {
+ case JEMALLOC:
+ // No stats.
+ return;
+ case WINDEFAULT:
+ case WINLFH:
+ // No stats.
+ return;
+ }
+#endif
+ tc_malloc_stats();
+}
+
+#ifdef WIN32
+
+extern "C" size_t _msize(void* p) {
+#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
+ switch (allocator) {
+ case JEMALLOC:
+ return je_msize(p);
+ case WINDEFAULT:
+ case WINLFH:
+ return win_heap_msize(p);
+ }
+#endif
+ return MallocExtension::instance()->GetAllocatedSize(p);
+}
+
+// This is included to resolve references from libcmt.
+extern "C" intptr_t _get_heap_handle() {
+ return 0;
+}
+
+// The CRT heap initialization stub.
+extern "C" int _heap_init() {
+#ifdef ENABLE_DYNAMIC_ALLOCATOR_SWITCHING
+ const char* override = GetenvBeforeMain("CHROME_ALLOCATOR");
+ if (override) {
+ if (!stricmp(override, "jemalloc"))
+ allocator = JEMALLOC;
+ else if (!stricmp(override, "winheap"))
+ allocator = WINDEFAULT;
+ else if (!stricmp(override, "winlfh"))
+ allocator = WINLFH;
+ else if (!stricmp(override, "tcmalloc"))
+ allocator = TCMALLOC;
+ }
+
+ switch (allocator) {
+ case JEMALLOC:
+ return je_malloc_init_hard() ? 0 : 1;
+ case WINDEFAULT:
+ return win_heap_init(false) ? 1 : 0;
+ case WINLFH:
+ return win_heap_init(true) ? 1 : 0;
+ case TCMALLOC:
+ default:
+ // fall through
+ break;
+ }
+#endif
+ // Initializing tcmalloc.
+ // We intentionally leak this object. It lasts for the process
+ // lifetime. Trying to teardown at _heap_term() is so late that
+ // you can't do anything useful anyway.
+ new TCMallocGuard();
+ return 1;
+}
+
+// The CRT heap cleanup stub.
+extern "C" void _heap_term() {}
+
+// We set this to 1 because part of the CRT uses a check of _crtheap != 0
+// to test whether the CRT has been initialized. Once we've ripped out
+// the allocators from libcmt, we need to provide this definition so that
+// the rest of the CRT is still usable.
+extern "C" void* _crtheap = reinterpret_cast<void*>(1);
+
+#endif // WIN32
+
+#include "generic_allocators.cc"
+
+} // extern C
diff --git a/base/allocator/allocator_unittests.cc b/base/allocator/allocator_unittests.cc
new file mode 100644
index 0000000..919cfab
--- /dev/null
+++ b/base/allocator/allocator_unittests.cc
@@ -0,0 +1,490 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <algorithm> // for min()
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Number of bits in a size_t.
+static const int kSizeBits = 8 * sizeof(size_t);
+// The maximum size of a size_t.
+static const size_t kMaxSize = ~static_cast<size_t>(0);
+// Maximum positive size of a size_t if it were signed.
+static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1);
+// An allocation size which is not too big to be reasonable.
+static const size_t kNotTooBig = 100000;
+// An allocation size which is just too big.
+static const size_t kTooBig = ~static_cast<size_t>(0);
+
+namespace {
+
+using std::min;
+
+// Fill a buffer of the specified size with a predetermined pattern
+static void Fill(unsigned char* buffer, int n) {
+ for (int i = 0; i < n; i++) {
+ buffer[i] = (i & 0xff);
+ }
+}
+
+// Check that the specified buffer has the predetermined pattern
+// generated by Fill()
+static bool Valid(unsigned char* buffer, int n) {
+ for (int i = 0; i < n; i++) {
+ if (buffer[i] != (i & 0xff)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Check that a buffer is completely zeroed.
+static bool IsZeroed(unsigned char* buffer, int n) {
+ for (int i = 0; i < n; i++) {
+ if (buffer[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Check alignment
+static void CheckAlignment(void* p, int align) {
+ EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p) & (align-1));
+}
+
+// Return the next interesting size/delta to check. Returns -1 if no more.
+static int NextSize(int size) {
+ if (size < 100)
+ return size+1;
+
+ if (size < 100000) {
+ // Find next power of two
+ int power = 1;
+ while (power < size)
+ power <<= 1;
+
+ // Yield (power-1, power, power+1)
+ if (size < power-1)
+ return power-1;
+
+ if (size == power-1)
+ return power;
+
+ assert(size == power);
+ return power+1;
+ } else {
+ return -1;
+ }
+}
+
+#define GG_ULONGLONG(x) static_cast<uint64>(x)
+
+template <class AtomicType>
+static void TestAtomicIncrement() {
+ // For now, we just test single threaded execution
+
+ // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
+ // outside the expected address bounds. This is in particular to
+ // test that some future change to the asm code doesn't cause the
+ // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
+ struct {
+ AtomicType prev_word;
+ AtomicType count;
+ AtomicType next_word;
+ } s;
+
+ AtomicType prev_word_value, next_word_value;
+ memset(&prev_word_value, 0xFF, sizeof(AtomicType));
+ memset(&next_word_value, 0xEE, sizeof(AtomicType));
+
+ s.prev_word = prev_word_value;
+ s.count = 0;
+ s.next_word = next_word_value;
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
+ EXPECT_EQ(s.count, 1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
+ EXPECT_EQ(s.count, 3);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
+ EXPECT_EQ(s.count, 6);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
+ EXPECT_EQ(s.count, 3);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
+ EXPECT_EQ(s.count, 1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
+ EXPECT_EQ(s.count, 0);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
+ EXPECT_EQ(s.count, -1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
+ EXPECT_EQ(s.count, -5);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
+ EXPECT_EQ(s.count, 0);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+}
+
+
+#define NUM_BITS(T) (sizeof(T) * 8)
+
+
+template <class AtomicType>
+static void TestCompareAndSwap() {
+ AtomicType value = 0;
+ AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
+ EXPECT_EQ(1, value);
+ EXPECT_EQ(0, prev);
+
+ // Use test value that has non-zero bits in both halves, more for testing
+ // 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val = (GG_ULONGLONG(1) <<
+ (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
+ EXPECT_EQ(k_test_val, value);
+ EXPECT_EQ(k_test_val, prev);
+
+ value = k_test_val;
+ prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+ EXPECT_EQ(5, value);
+ EXPECT_EQ(k_test_val, prev);
+}
+
+
+template <class AtomicType>
+static void TestAtomicExchange() {
+ AtomicType value = 0;
+ AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
+ EXPECT_EQ(1, value);
+ EXPECT_EQ(0, new_value);
+
+ // Use test value that has non-zero bits in both halves, more for testing
+ // 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val = (GG_ULONGLONG(1) <<
+ (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
+ EXPECT_EQ(k_test_val, value);
+ EXPECT_EQ(k_test_val, new_value);
+
+ value = k_test_val;
+ new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
+ EXPECT_EQ(5, value);
+ EXPECT_EQ(k_test_val, new_value);
+}
+
+
+template <class AtomicType>
+static void TestAtomicIncrementBounds() {
+ // Test increment at the half-width boundary of the atomic type.
+ // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
+ AtomicType test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2);
+ AtomicType value = test_val - 1;
+ AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+ EXPECT_EQ(test_val, value);
+ EXPECT_EQ(value, new_value);
+
+ base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+ EXPECT_EQ(test_val - 1, value);
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestStore() {
+ const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ base::subtle::NoBarrier_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::NoBarrier_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+
+ base::subtle::Acquire_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::Acquire_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+
+ base::subtle::Release_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::Release_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestLoad() {
+ const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value));
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::Release_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::Release_Load(&value));
+}
+
+template <class AtomicType>
+static void TestAtomicOps() {
+ TestCompareAndSwap<AtomicType>();
+ TestAtomicExchange<AtomicType>();
+ TestAtomicIncrementBounds<AtomicType>();
+ TestStore<AtomicType>();
+ TestLoad<AtomicType>();
+}
+
+static void TestCalloc(size_t n, size_t s, bool ok) {
+ char* p = reinterpret_cast<char*>(calloc(n, s));
+ if (!ok) {
+ EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed";
+ } else {
+ EXPECT_NE(reinterpret_cast<void*>(NULL), p) <<
+ "calloc(n, s) should succeed";
+ for (int i = 0; i < n*s; i++) {
+ EXPECT_EQ('\0', p[i]);
+ }
+ free(p);
+ }
+}
+
+
+// A global test counter for number of times the NewHandler is called.
+static int news_handled = 0;
+static void TestNewHandler() {
+ ++news_handled;
+ throw std::bad_alloc();
+}
+
+// Because we compile without exceptions, we expect these will not throw.
+static void TestOneNewWithoutExceptions(void* (*func)(size_t),
+ bool should_throw) {
+ // success test
+ try {
+ void* ptr = (*func)(kNotTooBig);
+ EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) <<
+ "allocation should not have failed.";
+ } catch(...) {
+ EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
+ }
+
+ // failure test
+ try {
+ void* rv = (*func)(kTooBig);
+ EXPECT_EQ(NULL, rv);
+ EXPECT_EQ(false, should_throw) << "allocation should have thrown.";
+ } catch(...) {
+ EXPECT_EQ(true, should_throw) << "allocation threw unexpected exception.";
+ }
+}
+
+static void TestNothrowNew(void* (*func)(size_t)) {
+ news_handled = 0;
+
+ // test without new_handler:
+ std::new_handler saved_handler = std::set_new_handler(0);
+ TestOneNewWithoutExceptions(func, false);
+
+ // test with new_handler:
+ std::set_new_handler(TestNewHandler);
+ TestOneNewWithoutExceptions(func, true);
+ EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called.";
+ std::set_new_handler(saved_handler);
+}
+
+} // namespace
+
+//-----------------------------------------------------------------------------
+
+TEST(Atomics, AtomicIncrementWord) {
+ TestAtomicIncrement<AtomicWord>();
+}
+
+TEST(Atomics, AtomicIncrement32) {
+ TestAtomicIncrement<Atomic32>();
+}
+
+TEST(Atomics, AtomicOpsWord) {
+ TestAtomicIncrement<AtomicWord>();
+}
+
+TEST(Atomics, AtomicOps32) {
+ TestAtomicIncrement<Atomic32>();
+}
+
+TEST(Allocators, Malloc) {
+ // Try allocating data with a bunch of alignments and sizes
+ for (int size = 1; size < 1048576; size *= 2) {
+ unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size));
+ CheckAlignment(ptr, 2); // Should be 2 byte aligned
+ Fill(ptr, size);
+ EXPECT_EQ(true, Valid(ptr, size));
+ free(ptr);
+ }
+}
+
+TEST(Allocators, Calloc) {
+ TestCalloc(0, 0, true);
+ TestCalloc(0, 1, true);
+ TestCalloc(1, 1, true);
+ TestCalloc(1<<10, 0, true);
+ TestCalloc(1<<20, 0, true);
+ TestCalloc(0, 1<<10, true);
+ TestCalloc(0, 1<<20, true);
+ TestCalloc(1<<20, 2, true);
+ TestCalloc(2, 1<<20, true);
+ TestCalloc(1000, 1000, true);
+
+ TestCalloc(kMaxSize, 2, false);
+ TestCalloc(2, kMaxSize, false);
+ TestCalloc(kMaxSize, kMaxSize, false);
+
+ TestCalloc(kMaxSignedSize, 3, false);
+ TestCalloc(3, kMaxSignedSize, false);
+ TestCalloc(kMaxSignedSize, kMaxSignedSize, false);
+}
+
+TEST(Allocators, New) {
+ TestNothrowNew(&::operator new);
+ TestNothrowNew(&::operator new[]);
+}
+
+// This makes sure that reallocing a small number of bytes in either
+// direction doesn't cause us to allocate new memory.
+TEST(Allocators, Realloc1) {
+ int start_sizes[] = { 100, 1000, 10000, 100000 };
+ int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
+
+ for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) {
+ void* p = malloc(start_sizes[s]);
+ CHECK(p);
+ // The larger the start-size, the larger the non-reallocing delta.
+ for (int d = 0; d < s*2; ++d) {
+ void* new_p = realloc(p, start_sizes[s] + deltas[d]);
+ CHECK(p == new_p); // realloc should not allocate new memory
+ }
+ // Test again, but this time reallocing smaller first.
+ for (int d = 0; d < s*2; ++d) {
+ void* new_p = realloc(p, start_sizes[s] - deltas[d]);
+ CHECK(p == new_p); // realloc should not allocate new memory
+ }
+ free(p);
+ }
+}
+
+TEST(Allocators, Realloc2) {
+ for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
+ for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
+ unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size));
+ Fill(src, src_size);
+ unsigned char* dst =
+ reinterpret_cast<unsigned char*>(realloc(src, dst_size));
+ EXPECT_EQ(true, Valid(dst, min(src_size, dst_size)));
+ Fill(dst, dst_size);
+ EXPECT_EQ(true, Valid(dst, dst_size));
+ if (dst != NULL) free(dst);
+ }
+ }
+
+ // Now make sure realloc works correctly even when we overflow the
+ // packed cache, so some entries are evicted from the cache.
+ // The cache has 2^12 entries, keyed by page number.
+ const int kNumEntries = 1 << 14;
+ int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries));
+ int sum = 0;
+ for (int i = 0; i < kNumEntries; i++) {
+ // no page size is likely to be bigger than 8192?
+ p[i] = reinterpret_cast<int*>(malloc(8192));
+ p[i][1000] = i; // use memory deep in the heart of p
+ }
+ for (int i = 0; i < kNumEntries; i++) {
+ p[i] = reinterpret_cast<int*>(realloc(p[i], 9000));
+ }
+ for (int i = 0; i < kNumEntries; i++) {
+ sum += p[i][1000];
+ free(p[i]);
+ }
+ EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even
+ free(p);
+}
+
+TEST(Allocators, ReallocZero) {
+ // Test that realloc to zero does not return NULL.
+ for (int size = 0; size >= 0; size = NextSize(size)) {
+ char* ptr = reinterpret_cast<char*>(malloc(size));
+ EXPECT_NE(static_cast<char*>(NULL), ptr);
+ ptr = reinterpret_cast<char*>(realloc(ptr, 0));
+ EXPECT_NE(static_cast<char*>(NULL), ptr);
+ if (ptr)
+ free(ptr);
+ }
+}
+
+#ifdef WIN32
+// Test recalloc
+TEST(Allocators, Recalloc) {
+ for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
+ for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
+ unsigned char* src =
+ reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size));
+ EXPECT_EQ(true, IsZeroed(src, src_size));
+ Fill(src, src_size);
+ unsigned char* dst =
+ reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size));
+ EXPECT_EQ(true, Valid(dst, min(src_size, dst_size)));
+ Fill(dst, dst_size);
+ EXPECT_EQ(true, Valid(dst, dst_size));
+ if (dst != NULL)
+ free(dst);
+ }
+ }
+}
+#endif
+
+
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
+
diff --git a/base/allocator/generic_allocators.cc b/base/allocator/generic_allocators.cc
new file mode 100644
index 0000000..6ea36ec
--- /dev/null
+++ b/base/allocator/generic_allocators.cc
@@ -0,0 +1,137 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// When possible, we implement allocator functions on top of the basic
+// low-level functions malloc() and free(). This way, including a new
+// allocator is as simple as providing just a small interface.
+//
+// As such, this file should not contain any allocator-specific code.
+
+// Implement a C++ style allocation, which always calls the new_handler
+// on failure.
+inline void* generic_cpp_alloc(size_t size, bool nothrow) {
+ void* ptr;
+ for (;;) {
+ ptr = malloc(size);
+ if (ptr)
+ return ptr;
+ if (!call_new_handler(nothrow))
+ break;
+ }
+ return ptr;
+}
+
+extern "C++" {
+
+void* __cdecl operator new(size_t size) {
+ return generic_cpp_alloc(size, false);
+}
+
+void operator delete(void* p) __THROW {
+ free(p);
+}
+
+void* operator new[](size_t size) {
+ return generic_cpp_alloc(size, false);
+}
+
+void operator delete[](void* p) __THROW {
+ free(p);
+}
+
+void* operator new(size_t size, const std::nothrow_t& nt) __THROW {
+ return generic_cpp_alloc(size, true);
+}
+
+void* operator new[](size_t size, const std::nothrow_t& nt) __THROW {
+ return generic_cpp_alloc(size, true);
+}
+
+// This function behaves similarly to MSVC's _set_new_mode.
+// If flag is 0 (default), calls to malloc will behave normally.
+// If flag is 1, calls to malloc will behave like calls to new,
+// and the std_new_handler will be invoked on failure.
+// Returns the previous mode.
+int _set_new_mode(int flag) __THROW {
+ int old_mode = new_mode;
+ new_mode = flag;
+ return old_mode;
+}
+
+} // extern "C++"
+
+extern "C" {
+
+void* calloc(size_t n, size_t elem_size) __THROW {
+ // Overflow check
+ const size_t size = n * elem_size;
+ if (elem_size != 0 && size / elem_size != n) return NULL;
+
+ void* result = malloc(size);
+ if (result != NULL) {
+ memset(result, 0, size);
+ }
+ return result;
+}
+
+void cfree(void* p) __THROW {
+ free(p);
+}
+
+#ifdef WIN32
+
+void* _recalloc(void* p, size_t n, size_t elem_size) {
+ if (!p)
+ return calloc(n, elem_size);
+
+ // This API is a bit odd.
+ // Note: recalloc only guarantees zeroed memory when p is NULL.
+ // Generally, calls to malloc() have padding. So a request
+ // to malloc N bytes actually malloc's N+x bytes. Later, if
+ // that buffer is passed to recalloc, we don't know what N
+ // was anymore. We only know what N+x is. As such, there is
+ // no way to know what to zero out.
+ const size_t size = n * elem_size;
+ if (elem_size != 0 && size / elem_size != n) return NULL;
+ return realloc(p, size);
+}
+
+void* _calloc_impl(size_t n, size_t size) {
+ return calloc(n, size);
+}
+
+#ifndef NDEBUG
+#undef malloc
+#undef free
+#undef calloc
+int _CrtDbgReport(int, const char*, int, const char*, const char*, ...) {
+ return 0;
+}
+
+int _CrtDbgReportW(int, const wchar_t*, int, const wchar_t*,
+ const wchar_t*, ...) {
+ return 0;
+}
+
+int _CrtSetReportMode(int, int) {
+ return 0;
+}
+
+void* _malloc_dbg(size_t size, int , const char*, int) {
+ return malloc(size);
+}
+
+void _free_dbg(void* ptr, int) {
+ free(ptr);
+}
+
+void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
+ return calloc(n, size);
+}
+#endif // NDEBUG
+
+#endif // WIN32
+
+} // extern C
+
diff --git a/base/allocator/prep_libc.sh b/base/allocator/prep_libc.sh
new file mode 100644
index 0000000..5edd2fe
--- /dev/null
+++ b/base/allocator/prep_libc.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# This script takes libcmt.lib for VS2005 and removes the allocation related
+# functions from it.
+#
+# Usage: prep_libcmt.bat <VCInstallDir> <OutputFile>
+#
+# VCInstallDir is the path where VC is installed, typically:
+# C:\Program Files\Microsoft Visual Studio 8\VC\
+#
+# OutputFile is the directory where the modified libcmt file should be stored.
+#
+
+LIBCMT="${1}\\libcmt.lib"
+LIBCMTPDB="${1}\\libcmt.pdb"
+OUTDIR=$2
+OUTCMT="${2}\\libcmt.lib"
+
+mkdir -p $OUTDIR
+cp "$LIBCMT" "$OUTDIR"
+cp "$LIBCMTPDB" "$OUTDIR"
+
+
+# We'll remove the symbols based on paths found in either the VS2005 or VS2008
+# libcmt.lib files.
+LIBCMTSRCPATHVS2005="build\\intel\\mt_obj\\"
+LIBCMTSRCPATHVS2008="f:\\dd\\vctools\\crt_bld\\SELF_X86\\crt\\src\\build\\INTEL\\mt_obj\\"
+
+OBJFILES="malloc.obj free.obj realloc.obj new.obj delete.obj new2.obj delete2.obj align.obj msize.obj heapinit.obj expand.obj heapchk.obj heapwalk.obj heapmin.obj sbheap.obj calloc.obj recalloc.obj calloc_impl.obj new_mode.obj newopnt.obj"
+
+for FILE in $OBJFILES
+do
+ echo ${FILE}
+ LIB /NOLOGO /IGNORE:4006,4014,4221 /REMOVE:${LIBCMTSRCPATHVS2005}${FILE} $OUTCMT
+ LIB /NOLOGO /IGNORE:4006,4014,4221 /REMOVE:${LIBCMTSRCPATHVS2008}${FILE} $OUTCMT
+done
diff --git a/base/allocator/unittest_utils.cc b/base/allocator/unittest_utils.cc
new file mode 100644
index 0000000..130ba15
--- /dev/null
+++ b/base/allocator/unittest_utils.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The unittests need a this in order to link up without pulling in tons
+// of other libraries
+
+#include <config.h>
+
+inline int snprintf(char* buffer, size_t count, const char* format, ...) {
+ int result;
+ va_list args;
+ va_start(args, format);
+ result = _vsnprintf(buffer, count, format, args);
+ va_end(args);
+ return result;
+}
+
diff --git a/base/allocator/win_allocator.cc b/base/allocator/win_allocator.cc
new file mode 100644
index 0000000..8ae653a
--- /dev/null
+++ b/base/allocator/win_allocator.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a simple allocator based on the windows heap.
+
+extern "C" {
+
+HANDLE win_heap;
+
+bool win_heap_init(bool use_lfh) {
+ win_heap = HeapCreate(0, 0, 0);
+ if (win_heap == NULL)
+ return false;
+
+ if (use_lfh) {
+ ULONG enable_lfh = 2;
+ HeapSetInformation(win_heap, HeapCompatibilityInformation,
+ &enable_lfh, sizeof(enable_lfh));
+ // NOTE: Setting LFH may fail. Vista already has it enabled.
+ // And under the debugger, it won't use LFH. So we
+ // ignore any errors.
+ }
+
+ return true;
+}
+
+void* win_heap_malloc(size_t size) {
+ return HeapAlloc(win_heap, 0, size);
+}
+
+void win_heap_free(void* size) {
+ HeapFree(win_heap, 0, size);
+}
+
+void* win_heap_realloc(void* ptr, size_t size) {
+ if (!ptr)
+ return win_heap_malloc(size);
+ if (!size) {
+ win_heap_free(ptr);
+ return NULL;
+ }
+ return HeapReAlloc(win_heap, 0, ptr, size);
+}
+
+size_t win_heap_msize(void* ptr) {
+ return HeapSize(win_heap, 0, ptr);
+}
+
+} // extern "C"