summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrsesek@chromium.org <rsesek@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-28 17:40:24 +0000
committerrsesek@chromium.org <rsesek@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-28 17:40:24 +0000
commitbac984109266ad0c8ce11a6930a81303de528455 (patch)
treec1a21d15b0c73fadb16671f7d47bd3e57a350d69
parentea1a2c532ad5ccf3d51165e06165d76a93a0806a (diff)
downloadchromium_src-bac984109266ad0c8ce11a6930a81303de528455.zip
chromium_src-bac984109266ad0c8ce11a6930a81303de528455.tar.gz
chromium_src-bac984109266ad0c8ce11a6930a81303de528455.tar.bz2
Split memory-related routines out of base/process_util.h into base/process/memory.h.
BUG=242290 R=brettw@chromium.org Review URL: https://codereview.chromium.org/17910003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@209172 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--base/base.gyp8
-rw-r--r--base/base.gypi12
-rw-r--r--base/process/memory.h71
-rw-r--r--base/process/memory_linux.cc183
-rw-r--r--base/process/memory_mac.mm704
-rw-r--r--base/process/memory_stubs.cc19
-rw-r--r--base/process/memory_unittest.cc379
-rw-r--r--base/process/memory_unittest_mac.h (renamed from base/process_util_unittest_mac.h)6
-rw-r--r--base/process/memory_unittest_mac.mm (renamed from base/process_util_unittest_mac.mm)2
-rw-r--r--base/process/memory_win.cc85
-rw-r--r--base/process_util.h52
-rw-r--r--base/process_util_freebsd.cc13
-rw-r--r--base/process_util_ios.mm8
-rw-r--r--base/process_util_linux.cc166
-rw-r--r--base/process_util_mac.mm681
-rw-r--r--base/process_util_openbsd.cc6
-rw-r--r--base/process_util_unittest.cc350
-rw-r--r--base/process_util_win.cc67
18 files changed, 1463 insertions, 1349 deletions
diff --git a/base/base.gyp b/base/base.gyp
index 17d31c9..9010bcc 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -561,8 +561,9 @@
'prefs/pref_value_store_unittest.cc',
'process_util_unittest.cc',
'process_util_unittest_ios.cc',
- 'process_util_unittest_mac.h',
- 'process_util_unittest_mac.mm',
+ 'process/memory_unittest.cc',
+ 'process/memory_unittest_mac.h',
+ 'process/memory_unittest_mac.mm',
'profiler/tracked_time_unittest.cc',
'rand_util_unittest.cc',
'safe_numerics_unittest.cc',
@@ -690,7 +691,8 @@
['OS == "ios" and _toolset != "host"', {
'sources/': [
# Only test the iOS-meaningful portion of process_utils.
- ['exclude', '^process_util_unittest'],
+ ['exclude', '^process_util_unittest\\.cc$'],
+ ['exclude', '^process/memory_unittest'],
['include', '^process_util_unittest_ios\\.cc$'],
# Requires spawning processes.
['exclude', '^metrics/stats_table_unittest\\.cc$'],
diff --git a/base/base.gypi b/base/base.gypi
index 1a36f27..4e8d7a7 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -395,6 +395,10 @@
'process_win.cc',
'process/internal_linux.cc',
'process/internal_linux.h',
+ 'process/memory.h',
+ 'process/memory_linux.cc',
+ 'process/memory_mac.mm',
+ 'process/memory_win.cc',
'process/process_iterator.cc',
'process/process_iterator.h',
'process/process_iterator_freebsd.cc',
@@ -714,6 +718,7 @@
'sources/': [
['include', '^files/file_path_watcher_linux\\.cc$'],
['include', '^process_util_linux\\.cc$'],
+ ['include', '^process/memory_linux\\.cc$'],
['include', '^process/internal_linux\\.cc$'],
['include', '^process/process_iterator\\.cc$'],
['include', '^process/process_iterator_linux\\.cc$'],
@@ -755,6 +760,10 @@
# needed on iOS (mostly for unit tests).
['exclude', '^process_util'],
['include', '^process_util_ios\\.mm$'],
+ ['exclude', '^process/memory_mac\\.mm$'],
+ ],
+ 'sources': [
+ 'process/memory_stubs.cc',
],
'sources!': [
'message_loop/message_pump_libevent.cc'
@@ -829,6 +838,9 @@
],
}],
['<(os_bsd)==1 and >(nacl_untrusted_build)==0', {
+ 'sources': [
+ 'process/memory_stubs.cc',
+ ],
'sources/': [
['exclude', '^files/file_path_watcher_linux\\.cc$'],
['exclude', '^files/file_path_watcher_stub\\.cc$'],
diff --git a/base/process/memory.h b/base/process/memory.h
new file mode 100644
index 0000000..d115a81
--- /dev/null
+++ b/base/process/memory.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_MEMORY_H_
+#define BASE_PROCESS_MEMORY_H_
+
+#include "base/basictypes.h"
+#include "base/base_export.h"
+#include "base/process.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+// Enables low fragmentation heap (LFH) for every heaps of this process. This
+// won't have any effect on heaps created after this function call. It will not
+// modify data allocated in the heaps before calling this function. So it is
+// better to call this function early in initialization and again before
+// entering the main loop.
+// Note: Returns true on Windows 2000 without doing anything.
+BASE_EXPORT bool EnableLowFragmentationHeap();
+
+// Enables 'terminate on heap corruption' flag. Helps protect against heap
+// overflow. Has no effect if the OS doesn't provide the necessary facility.
+BASE_EXPORT void EnableTerminationOnHeapCorruption();
+
+// Turns on process termination if memory runs out.
+BASE_EXPORT void EnableTerminationOnOutOfMemory();
+
+#if defined(USE_LINUX_BREAKPAD)
+BASE_EXPORT extern size_t g_oom_size;
+#endif
+
+#if defined(OS_WIN)
+// Returns the module handle to which an address belongs. The reference count
+// of the module is not incremented.
+BASE_EXPORT HMODULE GetModuleFromAddress(void* address);
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+// The maximum allowed value for the OOM score.
+const int kMaxOomScore = 1000;
+
+// This adjusts /proc/<pid>/oom_score_adj so the Linux OOM killer will
+// prefer to kill certain process types over others. The range for the
+// adjustment is [-1000, 1000], with [0, 1000] being user accessible.
+// If the Linux system doesn't support the newer oom_score_adj range
+// of [0, 1000], then we revert to using the older oom_adj, and
+// translate the given value into [0, 15]. Some aliasing of values
+// may occur in that case, of course.
+BASE_EXPORT bool AdjustOOMScore(ProcessId process, int score);
+#endif
+
+#if defined(OS_MACOSX)
+// Very large images or svg canvases can cause huge mallocs. Skia
+// does tricks on tcmalloc-based systems to allow malloc to fail with
+// a NULL rather than hit the oom crasher. This replicates that for
+// OSX.
+//
+// IF YOU USE THIS WITHOUT CONSULTING YOUR FRIENDLY OSX DEVELOPER,
+// YOUR CODE IS LIKELY TO BE REVERTED. THANK YOU.
+BASE_EXPORT void* UncheckedMalloc(size_t size);
+#endif // defined(OS_MACOSX)
+
+} // namespace base
+
+#endif // BASE_PROCESS_MEMORY_H_
diff --git a/base/process/memory_linux.cc b/base/process/memory_linux.cc
new file mode 100644
index 0000000..81cf20c
--- /dev/null
+++ b/base/process/memory_linux.cc
@@ -0,0 +1,183 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <new>
+
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace base {
+
+namespace {
+
+void OnNoMemorySize(size_t size) {
+#if defined(USE_LINUX_BREAKPAD)
+ g_oom_size = size;
+#endif
+
+ if (size != 0)
+ LOG(FATAL) << "Out of memory, size = " << size;
+ LOG(FATAL) << "Out of memory.";
+}
+
+void OnNoMemory() {
+ OnNoMemorySize(0);
+}
+
+} // namespace
+
+#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
+ !defined(THREAD_SANITIZER) && !defined(LEAK_SANITIZER)
+
+#if defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
+
+extern "C" {
+void* __libc_malloc(size_t size);
+void* __libc_realloc(void* ptr, size_t size);
+void* __libc_calloc(size_t nmemb, size_t size);
+void* __libc_valloc(size_t size);
+void* __libc_pvalloc(size_t size);
+void* __libc_memalign(size_t alignment, size_t size);
+
+// Overriding the system memory allocation functions:
+//
+// For security reasons, we want malloc failures to be fatal. Too much code
+// doesn't check for a NULL return value from malloc and unconditionally uses
+// the resulting pointer. If the first offset that they try to access is
+// attacker controlled, then the attacker can direct the code to access any
+// part of memory.
+//
+// Thus, we define all the standard malloc functions here and mark them as
+// visibility 'default'. This means that they replace the malloc functions for
+// all Chromium code and also for all code in shared libraries. There are tests
+// for this in process_util_unittest.cc.
+//
+// If we are using tcmalloc, then the problem is moot since tcmalloc handles
+// this for us. Thus this code is in a !defined(USE_TCMALLOC) block.
+//
+// If we are testing the binary with AddressSanitizer, we should not
+// redefine malloc and let AddressSanitizer do it instead.
+//
+// We call the real libc functions in this code by using __libc_malloc etc.
+// Previously we tried using dlsym(RTLD_NEXT, ...) but that failed depending on
+// the link order. Since ld.so needs calloc during symbol resolution, it
+// defines its own versions of several of these functions in dl-minimal.c.
+// Depending on the runtime library order, dlsym ended up giving us those
+// functions and bad things happened. See crbug.com/31809
+//
+// This means that any code which calls __libc_* gets the raw libc versions of
+// these functions.
+
+#define DIE_ON_OOM_1(function_name) \
+ void* function_name(size_t) __attribute__ ((visibility("default"))); \
+ \
+ void* function_name(size_t size) { \
+ void* ret = __libc_##function_name(size); \
+ if (ret == NULL && size != 0) \
+ OnNoMemorySize(size); \
+ return ret; \
+ }
+
+#define DIE_ON_OOM_2(function_name, arg1_type) \
+ void* function_name(arg1_type, size_t) \
+ __attribute__ ((visibility("default"))); \
+ \
+ void* function_name(arg1_type arg1, size_t size) { \
+ void* ret = __libc_##function_name(arg1, size); \
+ if (ret == NULL && size != 0) \
+ OnNoMemorySize(size); \
+ return ret; \
+ }
+
+DIE_ON_OOM_1(malloc)
+DIE_ON_OOM_1(valloc)
+DIE_ON_OOM_1(pvalloc)
+
+DIE_ON_OOM_2(calloc, size_t)
+DIE_ON_OOM_2(realloc, void*)
+DIE_ON_OOM_2(memalign, size_t)
+
+// posix_memalign has a unique signature and doesn't have a __libc_ variant.
+int posix_memalign(void** ptr, size_t alignment, size_t size)
+ __attribute__ ((visibility("default")));
+
+int posix_memalign(void** ptr, size_t alignment, size_t size) {
+ // This will use the safe version of memalign, above.
+ *ptr = memalign(alignment, size);
+ return 0;
+}
+
+} // extern C
+
+#else
+
+// TODO(mostynb@opera.com): dlsym dance
+
+#endif // LIBC_GLIBC && !USE_TCMALLOC
+
+#endif // !*_SANITIZER
+
+void EnableTerminationOnHeapCorruption() {
+ // On Linux, there nothing to do AFAIK.
+}
+
+void EnableTerminationOnOutOfMemory() {
+#if defined(OS_ANDROID)
+ // Android doesn't support setting a new handler.
+ DLOG(WARNING) << "Not feasible.";
+#else
+ // Set the new-out of memory handler.
+ std::set_new_handler(&OnNoMemory);
+ // If we're using glibc's allocator, the above functions will override
+ // malloc and friends and make them die on out of memory.
+#endif
+}
+
+// NOTE: This is not the only version of this function in the source:
+// the setuid sandbox (in process_util_linux.c, in the sandbox source)
+// also has its own C version.
+bool AdjustOOMScore(ProcessId process, int score) {
+ if (score < 0 || score > kMaxOomScore)
+ return false;
+
+ FilePath oom_path(internal::GetProcPidDir(process));
+
+ // Attempt to write the newer oom_score_adj file first.
+ FilePath oom_file = oom_path.AppendASCII("oom_score_adj");
+ if (file_util::PathExists(oom_file)) {
+ std::string score_str = IntToString(score);
+ DVLOG(1) << "Adjusting oom_score_adj of " << process << " to "
+ << score_str;
+ int score_len = static_cast<int>(score_str.length());
+ return (score_len == file_util::WriteFile(oom_file,
+ score_str.c_str(),
+ score_len));
+ }
+
+ // If the oom_score_adj file doesn't exist, then we write the old
+ // style file and translate the oom_adj score to the range 0-15.
+ oom_file = oom_path.AppendASCII("oom_adj");
+ if (file_util::PathExists(oom_file)) {
+ // Max score for the old oom_adj range. Used for conversion of new
+ // values to old values.
+ const int kMaxOldOomScore = 15;
+
+ int converted_score = score * kMaxOldOomScore / kMaxOomScore;
+ std::string score_str = IntToString(converted_score);
+ DVLOG(1) << "Adjusting oom_adj of " << process << " to " << score_str;
+ int score_len = static_cast<int>(score_str.length());
+ return (score_len == file_util::WriteFile(oom_file,
+ score_str.c_str(),
+ score_len));
+ }
+
+ return false;
+}
+
+} // namespace base
diff --git a/base/process/memory_mac.mm b/base/process/memory_mac.mm
new file mode 100644
index 0000000..dd30e70
--- /dev/null
+++ b/base/process/memory_mac.mm
@@ -0,0 +1,704 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <errno.h>
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <malloc/malloc.h>
+#import <objc/runtime.h>
+
+#include <new>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/scoped_clear_errno.h"
+#include "third_party/apple_apsl/CFBase.h"
+#include "third_party/apple_apsl/malloc.h"
+
+#if ARCH_CPU_32_BITS
+#include <dlfcn.h>
+#include <mach-o/nlist.h>
+
+#include "base/threading/thread_local.h"
+#include "third_party/mach_override/mach_override.h"
+#endif // ARCH_CPU_32_BITS
+
+namespace base {
+
+// These are helpers for EnableTerminationOnHeapCorruption, which is a no-op
+// on 64 bit Macs.
+#if ARCH_CPU_32_BITS
+namespace {
+
+// Finds the library path for malloc() and thus the libC part of libSystem,
+// which in Lion is in a separate image.
+const char* LookUpLibCPath() {
+ const void* addr = reinterpret_cast<void*>(&malloc);
+
+ Dl_info info;
+ if (dladdr(addr, &info))
+ return info.dli_fname;
+
+ DLOG(WARNING) << "Could not find image path for malloc()";
+ return NULL;
+}
+
+typedef void(*malloc_error_break_t)(void);
+malloc_error_break_t g_original_malloc_error_break = NULL;
+
+// Returns the function pointer for malloc_error_break. This symbol is declared
+// as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to
+// get it.
+malloc_error_break_t LookUpMallocErrorBreak() {
+ const char* lib_c_path = LookUpLibCPath();
+ if (!lib_c_path)
+ return NULL;
+
+ // Only need to look up two symbols, but nlist() requires a NULL-terminated
+ // array and takes no count.
+ struct nlist nl[3];
+ bzero(&nl, sizeof(nl));
+
+ // The symbol to find.
+ nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break");
+
+ // A reference symbol by which the address of the desired symbol will be
+ // calculated.
+ nl[1].n_un.n_name = const_cast<char*>("_malloc");
+
+ int rv = nlist(lib_c_path, nl);
+ if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) {
+ return NULL;
+ }
+
+ // nlist() returns addresses as offsets in the image, not the instruction
+ // pointer in memory. Use the known in-memory address of malloc()
+ // to compute the offset for malloc_error_break().
+ uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc);
+ reference_addr -= nl[1].n_value;
+ reference_addr += nl[0].n_value;
+
+ return reinterpret_cast<malloc_error_break_t>(reference_addr);
+}
+
+// Combines ThreadLocalBoolean with AutoReset. It would be convenient
+// to compose ThreadLocalPointer<bool> with base::AutoReset<bool>, but that
+// would require allocating some storage for the bool.
+class ThreadLocalBooleanAutoReset {
+ public:
+ ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value)
+ : scoped_tlb_(tlb),
+ original_value_(tlb->Get()) {
+ scoped_tlb_->Set(new_value);
+ }
+ ~ThreadLocalBooleanAutoReset() {
+ scoped_tlb_->Set(original_value_);
+ }
+
+ private:
+ ThreadLocalBoolean* scoped_tlb_;
+ bool original_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset);
+};
+
+base::LazyInstance<ThreadLocalBoolean>::Leaky
+ g_unchecked_malloc = LAZY_INSTANCE_INITIALIZER;
+
+// NOTE(shess): This is called when the malloc library noticed that the heap
+// is fubar. Avoid calls which will re-enter the malloc library.
+void CrMallocErrorBreak() {
+ g_original_malloc_error_break();
+
+ // Out of memory is certainly not heap corruption, and not necessarily
+ // something for which the process should be terminated. Leave that decision
+ // to the OOM killer. The EBADF case comes up because the malloc library
+ // attempts to log to ASL (syslog) before calling this code, which fails
+ // accessing a Unix-domain socket because of sandboxing.
+ if (errno == ENOMEM || (errno == EBADF && g_unchecked_malloc.Get().Get()))
+ return;
+
+ // A unit test checks this error message, so it needs to be in release builds.
+ char buf[1024] =
+ "Terminating process due to a potential for future heap corruption: "
+ "errno=";
+ char errnobuf[] = {
+ '0' + ((errno / 100) % 10),
+ '0' + ((errno / 10) % 10),
+ '0' + (errno % 10),
+ '\000'
+ };
+ COMPILE_ASSERT(ELAST <= 999, errno_too_large_to_encode);
+ strlcat(buf, errnobuf, sizeof(buf));
+ RAW_LOG(ERROR, buf);
+
+ // Crash by writing to NULL+errno to allow analyzing errno from
+ // crash dump info (setting a breakpad key would re-enter the malloc
+ // library). Max documented errno in intro(2) is actually 102, but
+ // it really just needs to be "small" to stay on the right vm page.
+ const int kMaxErrno = 256;
+ char* volatile death_ptr = NULL;
+ death_ptr += std::min(errno, kMaxErrno);
+ *death_ptr = '!';
+}
+
+} // namespace
+#endif // ARCH_CPU_32_BITS
+
+void EnableTerminationOnHeapCorruption() {
+#if defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
+ // AddressSanitizer handles heap corruption, and on 64 bit Macs, the malloc
+ // system automatically abort()s on heap corruption.
+ return;
+#else
+ // Only override once, otherwise CrMallocErrorBreak() will recurse
+ // to itself.
+ if (g_original_malloc_error_break)
+ return;
+
+ malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak();
+ if (!malloc_error_break) {
+ DLOG(WARNING) << "Could not find malloc_error_break";
+ return;
+ }
+
+ mach_error_t err = mach_override_ptr(
+ (void*)malloc_error_break,
+ (void*)&CrMallocErrorBreak,
+ (void**)&g_original_malloc_error_break);
+
+ if (err != err_none)
+ DLOG(WARNING) << "Could not override malloc_error_break; error = " << err;
+#endif // defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
+}
+
+// ------------------------------------------------------------------------
+
+namespace {
+
+bool g_oom_killer_enabled;
+
+// Starting with Mac OS X 10.7, the zone allocators set up by the system are
+// read-only, to prevent them from being overwritten in an attack. However,
+// blindly unprotecting and reprotecting the zone allocators fails with
+// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
+// memory in its bss. Explicit saving/restoring of the protection is required.
+//
+// This function takes a pointer to a malloc zone, de-protects it if necessary,
+// and returns (in the out parameters) a region of memory (if any) to be
+// re-protected when modifications are complete. This approach assumes that
+// there is no contention for the protection of this memory.
+void DeprotectMallocZone(ChromeMallocZone* default_zone,
+ mach_vm_address_t* reprotection_start,
+ mach_vm_size_t* reprotection_length,
+ vm_prot_t* reprotection_value) {
+ mach_port_t unused;
+ *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
+ struct vm_region_basic_info_64 info;
+ mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+ kern_return_t result =
+ mach_vm_region(mach_task_self(),
+ reprotection_start,
+ reprotection_length,
+ VM_REGION_BASIC_INFO_64,
+ reinterpret_cast<vm_region_info_t>(&info),
+ &count,
+ &unused);
+ CHECK(result == KERN_SUCCESS);
+
+ result = mach_port_deallocate(mach_task_self(), unused);
+ CHECK(result == KERN_SUCCESS);
+
+ // Does the region fully enclose the zone pointers? Possibly unwarranted
+ // simplification used: using the size of a full version 8 malloc zone rather
+ // than the actual smaller size if the passed-in zone is not version 8.
+ CHECK(*reprotection_start <=
+ reinterpret_cast<mach_vm_address_t>(default_zone));
+ mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
+ reinterpret_cast<mach_vm_size_t>(*reprotection_start);
+ CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
+
+ if (info.protection & VM_PROT_WRITE) {
+ // No change needed; the zone is already writable.
+ *reprotection_start = 0;
+ *reprotection_length = 0;
+ *reprotection_value = VM_PROT_NONE;
+ } else {
+ *reprotection_value = info.protection;
+ result = mach_vm_protect(mach_task_self(),
+ *reprotection_start,
+ *reprotection_length,
+ false,
+ info.protection | VM_PROT_WRITE);
+ CHECK(result == KERN_SUCCESS);
+ }
+}
+
+// === C malloc/calloc/valloc/realloc/posix_memalign ===
+
+typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
+ size_t size);
+typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
+ size_t num_items,
+ size_t size);
+typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
+ size_t size);
+typedef void (*free_type)(struct _malloc_zone_t* zone,
+ void* ptr);
+typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
+ void* ptr,
+ size_t size);
+typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
+ size_t alignment,
+ size_t size);
+
+malloc_type g_old_malloc;
+calloc_type g_old_calloc;
+valloc_type g_old_valloc;
+free_type g_old_free;
+realloc_type g_old_realloc;
+memalign_type g_old_memalign;
+
+malloc_type g_old_malloc_purgeable;
+calloc_type g_old_calloc_purgeable;
+valloc_type g_old_valloc_purgeable;
+free_type g_old_free_purgeable;
+realloc_type g_old_realloc_purgeable;
+memalign_type g_old_memalign_purgeable;
+
+void* oom_killer_malloc(struct _malloc_zone_t* zone,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_malloc(zone, size);
+ if (!result && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_calloc(struct _malloc_zone_t* zone,
+ size_t num_items,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_calloc(zone, num_items, size);
+ if (!result && num_items && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_valloc(struct _malloc_zone_t* zone,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_valloc(zone, size);
+ if (!result && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void oom_killer_free(struct _malloc_zone_t* zone,
+ void* ptr) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ g_old_free(zone, ptr);
+}
+
+void* oom_killer_realloc(struct _malloc_zone_t* zone,
+ void* ptr,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_realloc(zone, ptr, size);
+ if (!result && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_memalign(struct _malloc_zone_t* zone,
+ size_t alignment,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_memalign(zone, alignment, size);
+ // Only die if posix_memalign would have returned ENOMEM, since there are
+ // other reasons why NULL might be returned (see
+ // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
+ if (!result && size && alignment >= sizeof(void*)
+ && (alignment & (alignment - 1)) == 0) {
+ debug::BreakDebugger();
+ }
+ return result;
+}
+
+void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_malloc_purgeable(zone, size);
+ if (!result && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
+ size_t num_items,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_calloc_purgeable(zone, num_items, size);
+ if (!result && num_items && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_valloc_purgeable(zone, size);
+ if (!result && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
+ void* ptr) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ g_old_free_purgeable(zone, ptr);
+}
+
+void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
+ void* ptr,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_realloc_purgeable(zone, ptr, size);
+ if (!result && size)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
+ size_t alignment,
+ size_t size) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+#endif // ARCH_CPU_32_BITS
+ void* result = g_old_memalign_purgeable(zone, alignment, size);
+ // Only die if posix_memalign would have returned ENOMEM, since there are
+ // other reasons why NULL might be returned (see
+ // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
+ if (!result && size && alignment >= sizeof(void*)
+ && (alignment & (alignment - 1)) == 0) {
+ debug::BreakDebugger();
+ }
+ return result;
+}
+
+// === C++ operator new ===
+
+void oom_killer_new() {
+ debug::BreakDebugger();
+}
+
+// === Core Foundation CFAllocators ===
+
+bool CanGetContextForCFAllocator() {
+ return !base::mac::IsOSLaterThanMountainLion_DontCallThis();
+}
+
+CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
+ if (base::mac::IsOSSnowLeopard()) {
+ ChromeCFAllocatorLeopards* our_allocator =
+ const_cast<ChromeCFAllocatorLeopards*>(
+ reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator));
+ return &our_allocator->_context;
+ } else if (base::mac::IsOSLion() || base::mac::IsOSMountainLion()) {
+ ChromeCFAllocatorLions* our_allocator =
+ const_cast<ChromeCFAllocatorLions*>(
+ reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
+ return &our_allocator->_context;
+ } else {
+ return NULL;
+ }
+}
+
+CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
+CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
+CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
+
+void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
+ CFOptionFlags hint,
+ void* info) {
+ void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
+ if (!result)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
+ CFOptionFlags hint,
+ void* info) {
+ void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
+ if (!result)
+ debug::BreakDebugger();
+ return result;
+}
+
+void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
+ CFOptionFlags hint,
+ void* info) {
+ void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
+ if (!result)
+ debug::BreakDebugger();
+ return result;
+}
+
+// === Cocoa NSObject allocation ===
+
+typedef id (*allocWithZone_t)(id, SEL, NSZone*);
+allocWithZone_t g_old_allocWithZone;
+
+id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
+{
+ id result = g_old_allocWithZone(self, _cmd, zone);
+ if (!result)
+ debug::BreakDebugger();
+ return result;
+}
+
+} // namespace
+
+void* UncheckedMalloc(size_t size) {
+ if (g_old_malloc) {
+#if ARCH_CPU_32_BITS
+ ScopedClearErrno clear_errno;
+ ThreadLocalBooleanAutoReset flag(g_unchecked_malloc.Pointer(), true);
+#endif // ARCH_CPU_32_BITS
+ return g_old_malloc(malloc_default_zone(), size);
+ }
+ return malloc(size);
+}
+
+void EnableTerminationOnOutOfMemory() {
+ if (g_oom_killer_enabled)
+ return;
+
+ g_oom_killer_enabled = true;
+
+ // === C malloc/calloc/valloc/realloc/posix_memalign ===
+
+ // This approach is not perfect, as requests for amounts of memory larger than
+ // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
+ // still fail with a NULL rather than dying (see
+ // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
+ // Unfortunately, it's the best we can do. Also note that this does not affect
+ // allocations from non-default zones.
+
+ CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
+ !g_old_memalign) << "Old allocators unexpectedly non-null";
+
+ CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
+ !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
+ !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
+
+#if !defined(ADDRESS_SANITIZER)
+ // Don't do anything special on OOM for the malloc zones replaced by
+ // AddressSanitizer, as modifying or protecting them may not work correctly.
+
+ ChromeMallocZone* default_zone =
+ reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+ ChromeMallocZone* purgeable_zone =
+ reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
+
+ mach_vm_address_t default_reprotection_start = 0;
+ mach_vm_size_t default_reprotection_length = 0;
+ vm_prot_t default_reprotection_value = VM_PROT_NONE;
+ DeprotectMallocZone(default_zone,
+ &default_reprotection_start,
+ &default_reprotection_length,
+ &default_reprotection_value);
+
+ mach_vm_address_t purgeable_reprotection_start = 0;
+ mach_vm_size_t purgeable_reprotection_length = 0;
+ vm_prot_t purgeable_reprotection_value = VM_PROT_NONE;
+ if (purgeable_zone) {
+ DeprotectMallocZone(purgeable_zone,
+ &purgeable_reprotection_start,
+ &purgeable_reprotection_length,
+ &purgeable_reprotection_value);
+ }
+
+ // Default zone
+
+ g_old_malloc = default_zone->malloc;
+ g_old_calloc = default_zone->calloc;
+ g_old_valloc = default_zone->valloc;
+ g_old_free = default_zone->free;
+ g_old_realloc = default_zone->realloc;
+ CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free &&
+ g_old_realloc)
+ << "Failed to get system allocation functions.";
+
+ default_zone->malloc = oom_killer_malloc;
+ default_zone->calloc = oom_killer_calloc;
+ default_zone->valloc = oom_killer_valloc;
+ default_zone->free = oom_killer_free;
+ default_zone->realloc = oom_killer_realloc;
+
+ if (default_zone->version >= 5) {
+ g_old_memalign = default_zone->memalign;
+ if (g_old_memalign)
+ default_zone->memalign = oom_killer_memalign;
+ }
+
+ // Purgeable zone (if it exists)
+
+ if (purgeable_zone) {
+ g_old_malloc_purgeable = purgeable_zone->malloc;
+ g_old_calloc_purgeable = purgeable_zone->calloc;
+ g_old_valloc_purgeable = purgeable_zone->valloc;
+ g_old_free_purgeable = purgeable_zone->free;
+ g_old_realloc_purgeable = purgeable_zone->realloc;
+ CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
+ g_old_valloc_purgeable && g_old_free_purgeable &&
+ g_old_realloc_purgeable)
+ << "Failed to get system allocation functions.";
+
+ purgeable_zone->malloc = oom_killer_malloc_purgeable;
+ purgeable_zone->calloc = oom_killer_calloc_purgeable;
+ purgeable_zone->valloc = oom_killer_valloc_purgeable;
+ purgeable_zone->free = oom_killer_free_purgeable;
+ purgeable_zone->realloc = oom_killer_realloc_purgeable;
+
+ if (purgeable_zone->version >= 5) {
+ g_old_memalign_purgeable = purgeable_zone->memalign;
+ if (g_old_memalign_purgeable)
+ purgeable_zone->memalign = oom_killer_memalign_purgeable;
+ }
+ }
+
+ // Restore protection if it was active.
+
+ if (default_reprotection_start) {
+ kern_return_t result = mach_vm_protect(mach_task_self(),
+ default_reprotection_start,
+ default_reprotection_length,
+ false,
+ default_reprotection_value);
+ CHECK(result == KERN_SUCCESS);
+ }
+
+ if (purgeable_reprotection_start) {
+ kern_return_t result = mach_vm_protect(mach_task_self(),
+ purgeable_reprotection_start,
+ purgeable_reprotection_length,
+ false,
+ purgeable_reprotection_value);
+ CHECK(result == KERN_SUCCESS);
+ }
+#endif
+
+ // === C malloc_zone_batch_malloc ===
+
+ // batch_malloc is omitted because the default malloc zone's implementation
+ // only supports batch_malloc for "tiny" allocations from the free list. It
+ // will fail for allocations larger than "tiny", and will only allocate as
+ // many blocks as it's able to from the free list. These factors mean that it
+ // can return less than the requested memory even in a non-out-of-memory
+ // situation. There's no good way to detect whether a batch_malloc failure is
+ // due to these other factors, or due to genuine memory or address space
+ // exhaustion. The fact that it only allocates space from the "tiny" free list
+ // means that it's likely that a failure will not be due to memory exhaustion.
+ // Similarly, these constraints on batch_malloc mean that callers must always
+ // be expecting to receive less memory than was requested, even in situations
+ // where memory pressure is not a concern. Finally, the only public interface
+ // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
+ // system's malloc implementation. It's unlikely that anyone's even heard of
+ // it.
+
+ // === C++ operator new ===
+
+ // Yes, operator new does call through to malloc, but this will catch failures
+ // that our imperfect handling of malloc cannot.
+
+ std::set_new_handler(oom_killer_new);
+
+#ifndef ADDRESS_SANITIZER
+ // === Core Foundation CFAllocators ===
+
+ // This will not catch allocation done by custom allocators, but will catch
+ // all allocation done by system-provided ones.
+
+ CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
+ !g_old_cfallocator_malloc_zone)
+ << "Old allocators unexpectedly non-null";
+
+ bool cf_allocator_internals_known = CanGetContextForCFAllocator();
+
+ if (cf_allocator_internals_known) {
+ CFAllocatorContext* context =
+ ContextForCFAllocator(kCFAllocatorSystemDefault);
+ CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
+ g_old_cfallocator_system_default = context->allocate;
+ CHECK(g_old_cfallocator_system_default)
+ << "Failed to get kCFAllocatorSystemDefault allocation function.";
+ context->allocate = oom_killer_cfallocator_system_default;
+
+ context = ContextForCFAllocator(kCFAllocatorMalloc);
+ CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
+ g_old_cfallocator_malloc = context->allocate;
+ CHECK(g_old_cfallocator_malloc)
+ << "Failed to get kCFAllocatorMalloc allocation function.";
+ context->allocate = oom_killer_cfallocator_malloc;
+
+ context = ContextForCFAllocator(kCFAllocatorMallocZone);
+ CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
+ g_old_cfallocator_malloc_zone = context->allocate;
+ CHECK(g_old_cfallocator_malloc_zone)
+ << "Failed to get kCFAllocatorMallocZone allocation function.";
+ context->allocate = oom_killer_cfallocator_malloc_zone;
+ } else {
+ NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
+ "CFAllocator will not result in termination. http://crbug.com/45650");
+ }
+#endif
+
+ // === Cocoa NSObject allocation ===
+
+ // Note that both +[NSObject new] and +[NSObject alloc] call through to
+ // +[NSObject allocWithZone:].
+
+ CHECK(!g_old_allocWithZone)
+ << "Old allocator unexpectedly non-null";
+
+ Class nsobject_class = [NSObject class];
+ Method orig_method = class_getClassMethod(nsobject_class,
+ @selector(allocWithZone:));
+ g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
+ method_getImplementation(orig_method));
+ CHECK(g_old_allocWithZone)
+ << "Failed to get allocWithZone allocation function.";
+ method_setImplementation(orig_method,
+ reinterpret_cast<IMP>(oom_killer_allocWithZone));
+}
+
+} // namespace base
diff --git a/base/process/memory_stubs.cc b/base/process/memory_stubs.cc
new file mode 100644
index 0000000..b06c7d5
--- /dev/null
+++ b/base/process/memory_stubs.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+namespace base {
+
+void EnableTerminationOnOutOfMemory() {
+}
+
+void EnableTerminationOnHeapCorruption() {
+}
+
+bool AdjustOOMScore(ProcessId process, int score) {
+ return false;
+}
+
+} // namespace base
diff --git a/base/process/memory_unittest.cc b/base/process/memory_unittest.cc
new file mode 100644
index 0000000..8022301
--- /dev/null
+++ b/base/process/memory_unittest.cc
@@ -0,0 +1,379 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define _CRT_SECURE_NO_WARNINGS
+
+#include "base/process/memory.h"
+
+#include <limits>
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+#if defined(OS_POSIX)
+#include <errno.h>
+#endif
+#if defined(OS_MACOSX)
+#include <malloc/malloc.h>
+#include "base/process/memory_unittest_mac.h"
+#endif
+#if defined(OS_LINUX)
+#include <glib.h>
+#include <malloc.h>
+#endif
+
+#if defined(OS_WIN)
+// HeapQueryInformation function pointer.
+typedef BOOL (WINAPI* HeapQueryFn) \
+ (HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T, PSIZE_T);
+
+const int kConstantInModule = 42;
+
+TEST(ProcessMemoryTest, GetModuleFromAddress) {
+ // Since the unit tests are their own EXE, this should be
+ // equivalent to the EXE's HINSTANCE.
+ //
+ // kConstantInModule is a constant in this file and
+ // therefore within the unit test EXE.
+ EXPECT_EQ(::GetModuleHandle(NULL),
+ base::GetModuleFromAddress(
+ const_cast<int*>(&kConstantInModule)));
+
+ // Any address within the kernel32 module should return
+ // kernel32's HMODULE. Our only assumption here is that
+ // kernel32 is larger than 4 bytes.
+ HMODULE kernel32 = ::GetModuleHandle(L"kernel32.dll");
+ HMODULE kernel32_from_address =
+ base::GetModuleFromAddress(reinterpret_cast<DWORD*>(kernel32) + 1);
+ EXPECT_EQ(kernel32, kernel32_from_address);
+}
+
+TEST(ProcessMemoryTest, EnableLFH) {
+ ASSERT_TRUE(base::EnableLowFragmentationHeap());
+ if (IsDebuggerPresent()) {
+ // Under these conditions, LFH can't be enabled. There's no point to test
+ // anything.
+ const char* no_debug_env = getenv("_NO_DEBUG_HEAP");
+ if (!no_debug_env || strcmp(no_debug_env, "1"))
+ return;
+ }
+ HMODULE kernel32 = GetModuleHandle(L"kernel32.dll");
+ ASSERT_TRUE(kernel32 != NULL);
+ HeapQueryFn heap_query = reinterpret_cast<HeapQueryFn>(GetProcAddress(
+ kernel32,
+ "HeapQueryInformation"));
+
+ // On Windows 2000, the function is not exported. This is not a reason to
+ // fail but we won't be able to retrieves information about the heap, so we
+ // should stop here.
+ if (heap_query == NULL)
+ return;
+
+ HANDLE heaps[1024] = { 0 };
+ unsigned number_heaps = GetProcessHeaps(1024, heaps);
+ EXPECT_GT(number_heaps, 0u);
+ for (unsigned i = 0; i < number_heaps; ++i) {
+ ULONG flag = 0;
+ SIZE_T length;
+ ASSERT_NE(0, heap_query(heaps[i],
+ HeapCompatibilityInformation,
+ &flag,
+ sizeof(flag),
+ &length));
+ // If flag is 0, the heap is a standard heap that does not support
+ // look-asides. If flag is 1, the heap supports look-asides. If flag is 2,
+ // the heap is a low-fragmentation heap (LFH). Note that look-asides are not
+ // supported on the LFH.
+
+ // We don't have any documented way of querying the HEAP_NO_SERIALIZE flag.
+ EXPECT_LE(flag, 2u);
+ EXPECT_NE(flag, 1u);
+ }
+}
+#endif // defined(OS_WIN)
+
+#if defined(OS_MACOSX)
+
+// For the following Mac tests:
+// Note that base::EnableTerminationOnHeapCorruption() is called as part of
+// test suite setup and does not need to be done again, else mach_override
+// will fail.
+
+#if !defined(ADDRESS_SANITIZER)
+// The following code tests the system implementation of malloc() thus no need
+// to test it under AddressSanitizer.
+TEST(ProcessMemoryTest, MacMallocFailureDoesNotTerminate) {
+ // Test that ENOMEM doesn't crash via CrMallocErrorBreak two ways: the exit
+ // code and lack of the error string. The number of bytes is one less than
+ // MALLOC_ABSOLUTE_MAX_SIZE, more than which the system early-returns NULL and
+ // does not call through malloc_error_break(). See the comment at
+ // EnableTerminationOnOutOfMemory() for more information.
+ void* buf = NULL;
+ ASSERT_EXIT(
+ {
+ base::EnableTerminationOnOutOfMemory();
+
+ buf = malloc(std::numeric_limits<size_t>::max() - (2 * PAGE_SIZE) - 1);
+ },
+ testing::KilledBySignal(SIGTRAP),
+ "\\*\\*\\* error: can't allocate region.*"
+ "(Terminating process due to a potential for future heap "
+ "corruption){0}");
+
+ base::debug::Alias(buf);
+}
+#endif // !defined(ADDRESS_SANITIZER)
+
+TEST(ProcessMemoryTest, MacTerminateOnHeapCorruption) {
+ // Assert that freeing an unallocated pointer will crash the process.
+ char buf[3];
+ asm("" : "=r" (buf)); // Prevent clang from being too smart.
+#if ARCH_CPU_64_BITS
+ // On 64 bit Macs, the malloc system automatically abort()s on heap corruption
+ // but does not output anything.
+ ASSERT_DEATH(free(buf), "");
+#elif defined(ADDRESS_SANITIZER)
+ // AddressSanitizer replaces malloc() and prints a different error message on
+ // heap corruption.
+ ASSERT_DEATH(free(buf), "attempting free on address which "
+ "was not malloc\\(\\)-ed");
+#else
+ ASSERT_DEATH(free(buf), "being freed.*"
+ "\\*\\*\\* set a breakpoint in malloc_error_break to debug.*"
+ "Terminating process due to a potential for future heap corruption");
+#endif // ARCH_CPU_64_BITS || defined(ADDRESS_SANITIZER)
+}
+
+#endif // defined(OS_MACOSX)
+
+// Android doesn't implement set_new_handler, so we can't use the
+// OutOfMemoryTest cases.
+// OpenBSD does not support these tests either.
+// AddressSanitizer and ThreadSanitizer define the malloc()/free()/etc.
+// functions so that they don't crash if the program is out of memory, so the
+// OOM tests aren't supposed to work.
+// TODO(vandebo) make this work on Windows too.
+#if !defined(OS_ANDROID) && !defined(OS_OPENBSD) && \
+ !defined(OS_WIN) && \
+ !defined(ADDRESS_SANITIZER) && !defined(THREAD_SANITIZER)
+
+#if defined(USE_TCMALLOC)
+extern "C" {
+int tc_set_new_mode(int mode);
+}
+#endif // defined(USE_TCMALLOC)
+
+class OutOfMemoryDeathTest : public testing::Test {
+ public:
+ OutOfMemoryDeathTest()
+ : value_(NULL),
+ // Make test size as large as possible minus a few pages so
+ // that alignment or other rounding doesn't make it wrap.
+ test_size_(std::numeric_limits<std::size_t>::max() - 12 * 1024),
+ signed_test_size_(std::numeric_limits<ssize_t>::max()) {
+ }
+
+#if defined(USE_TCMALLOC)
+ virtual void SetUp() OVERRIDE {
+ tc_set_new_mode(1);
+ }
+
+ virtual void TearDown() OVERRIDE {
+ tc_set_new_mode(0);
+ }
+#endif // defined(USE_TCMALLOC)
+
+ void SetUpInDeathAssert() {
+ // Must call EnableTerminationOnOutOfMemory() because that is called from
+ // chrome's main function and therefore hasn't been called yet.
+ // Since this call may result in another thread being created and death
+ // tests shouldn't be started in a multithread environment, this call
+ // should be done inside of the ASSERT_DEATH.
+ base::EnableTerminationOnOutOfMemory();
+ }
+
+ void* value_;
+ size_t test_size_;
+ ssize_t signed_test_size_;
+};
+
+TEST_F(OutOfMemoryDeathTest, New) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = operator new(test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, NewArray) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = new char[test_size_];
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, Malloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = malloc(test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, Realloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = realloc(NULL, test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, Calloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = calloc(1024, test_size_ / 1024L);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, Valloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = valloc(test_size_);
+ }, "");
+}
+
+#if defined(OS_LINUX)
+TEST_F(OutOfMemoryDeathTest, Pvalloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = pvalloc(test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, Memalign) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = memalign(4, test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, ViaSharedLibraries) {
+ // g_try_malloc is documented to return NULL on failure. (g_malloc is the
+ // 'safe' default that crashes if allocation fails). However, since we have
+ // hopefully overridden malloc, even g_try_malloc should fail. This tests
+ // that the run-time symbol resolution is overriding malloc for shared
+ // libraries as well as for our code.
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = g_try_malloc(test_size_);
+ }, "");
+}
+#endif // OS_LINUX
+
+// Android doesn't implement posix_memalign().
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
+TEST_F(OutOfMemoryDeathTest, Posix_memalign) {
+ // Grab the return value of posix_memalign to silence a compiler warning
+ // about unused return values. We don't actually care about the return
+ // value, since we're asserting death.
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ EXPECT_EQ(ENOMEM, posix_memalign(&value_, 8, test_size_));
+ }, "");
+}
+#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
+
+#if defined(OS_MACOSX)
+
+// Purgeable zone tests
+
+TEST_F(OutOfMemoryDeathTest, MallocPurgeable) {
+ malloc_zone_t* zone = malloc_default_purgeable_zone();
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = malloc_zone_malloc(zone, test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, ReallocPurgeable) {
+ malloc_zone_t* zone = malloc_default_purgeable_zone();
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = malloc_zone_realloc(zone, NULL, test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, CallocPurgeable) {
+ malloc_zone_t* zone = malloc_default_purgeable_zone();
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = malloc_zone_calloc(zone, 1024, test_size_ / 1024L);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, VallocPurgeable) {
+ malloc_zone_t* zone = malloc_default_purgeable_zone();
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = malloc_zone_valloc(zone, test_size_);
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, PosixMemalignPurgeable) {
+ malloc_zone_t* zone = malloc_default_purgeable_zone();
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = malloc_zone_memalign(zone, 8, test_size_);
+ }, "");
+}
+
+// Since these allocation functions take a signed size, it's possible that
+// calling them just once won't be enough to exhaust memory. In the 32-bit
+// environment, it's likely that these allocation attempts will fail because
+// not enough contiguous address space is available. In the 64-bit environment,
+// it's likely that they'll fail because they would require a preposterous
+// amount of (virtual) memory.
+
+TEST_F(OutOfMemoryDeathTest, CFAllocatorSystemDefault) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ while ((value_ =
+ base::AllocateViaCFAllocatorSystemDefault(signed_test_size_))) {}
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, CFAllocatorMalloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ while ((value_ =
+ base::AllocateViaCFAllocatorMalloc(signed_test_size_))) {}
+ }, "");
+}
+
+TEST_F(OutOfMemoryDeathTest, CFAllocatorMallocZone) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ while ((value_ =
+ base::AllocateViaCFAllocatorMallocZone(signed_test_size_))) {}
+ }, "");
+}
+
+#if !defined(ARCH_CPU_64_BITS)
+
+// See process_util_unittest_mac.mm for an explanation of why this test isn't
+// run in the 64-bit environment.
+
+TEST_F(OutOfMemoryDeathTest, PsychoticallyBigObjCObject) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ while ((value_ = base::AllocatePsychoticallyBigObjCObject())) {}
+ }, "");
+}
+
+#endif // !ARCH_CPU_64_BITS
+#endif // OS_MACOSX
+
+#endif // !defined(OS_ANDROID) && !defined(OS_OPENBSD) &&
+ // !defined(OS_WIN) && !defined(ADDRESS_SANITIZER)
diff --git a/base/process_util_unittest_mac.h b/base/process/memory_unittest_mac.h
index 7b4fe1c..472d2c5 100644
--- a/base/process_util_unittest_mac.h
+++ b/base/process/memory_unittest_mac.h
@@ -5,8 +5,8 @@
// This file contains helpers for the process_util_unittest to allow it to fully
// test the Mac code.
-#ifndef BASE_PROCESS_UTIL_UNITTEST_MAC_H_
-#define BASE_PROCESS_UTIL_UNITTEST_MAC_H_
+#ifndef BASE_PROCESS_MEMORY_UNITTEST_MAC_H_
+#define BASE_PROCESS_MEMORY_UNITTEST_MAC_H_
#include "base/basictypes.h"
@@ -29,4 +29,4 @@ void* AllocatePsychoticallyBigObjCObject();
} // namespace base
-#endif // BASE_PROCESS_UTIL_UNITTEST_MAC_H_
+#endif // BASE_PROCESS_MEMORY_UNITTEST_MAC_H_
diff --git a/base/process_util_unittest_mac.mm b/base/process/memory_unittest_mac.mm
index 2ef1868..bc4bf65 100644
--- a/base/process_util_unittest_mac.mm
+++ b/base/process/memory_unittest_mac.mm
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/process_util_unittest_mac.h"
+#include "base/process/memory_unittest_mac.h"
#import <Foundation/Foundation.h>
#include <CoreFoundation/CoreFoundation.h>
diff --git a/base/process/memory_win.cc b/base/process/memory_win.cc
new file mode 100644
index 0000000..c53a1be
--- /dev/null
+++ b/base/process/memory_win.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <psapi.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+
+namespace base {
+
+namespace {
+
+void OnNoMemory() {
+ // Kill the process. This is important for security, since WebKit doesn't
+ // NULL-check many memory allocations. If a malloc fails, returns NULL, and
+ // the buffer is then used, it provides a handy mapping of memory starting at
+ // address 0 for an attacker to utilize.
+ __debugbreak();
+ _exit(1);
+}
+
+// HeapSetInformation function pointer.
+typedef BOOL (WINAPI* HeapSetFn)(HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T);
+
+} // namespace
+
+bool EnableLowFragmentationHeap() {
+ HMODULE kernel32 = GetModuleHandle(L"kernel32.dll");
+ HeapSetFn heap_set = reinterpret_cast<HeapSetFn>(GetProcAddress(
+ kernel32,
+ "HeapSetInformation"));
+
+ // On Windows 2000, the function is not exported. This is not a reason to
+ // fail.
+ if (!heap_set)
+ return true;
+
+ unsigned number_heaps = GetProcessHeaps(0, NULL);
+ if (!number_heaps)
+ return false;
+
+ // Gives us some extra space in the array in case a thread is creating heaps
+ // at the same time we're querying them.
+ static const int MARGIN = 8;
+ scoped_ptr<HANDLE[]> heaps(new HANDLE[number_heaps + MARGIN]);
+ number_heaps = GetProcessHeaps(number_heaps + MARGIN, heaps.get());
+ if (!number_heaps)
+ return false;
+
+ for (unsigned i = 0; i < number_heaps; ++i) {
+ ULONG lfh_flag = 2;
+ // Don't bother with the result code. It may fails on heaps that have the
+ // HEAP_NO_SERIALIZE flag. This is expected and not a problem at all.
+ heap_set(heaps[i],
+ HeapCompatibilityInformation,
+ &lfh_flag,
+ sizeof(lfh_flag));
+ }
+ return true;
+}
+
+void EnableTerminationOnHeapCorruption() {
+ // Ignore the result code. Supported on XP SP3 and Vista.
+ HeapSetInformation(NULL, HeapEnableTerminationOnCorruption, NULL, 0);
+}
+
+void EnableTerminationOnOutOfMemory() {
+ std::set_new_handler(&OnNoMemory);
+}
+
+HMODULE GetModuleFromAddress(void* address) {
+ HMODULE instance = NULL;
+ if (!::GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ static_cast<char*>(address),
+ &instance)) {
+ NOTREACHED();
+ }
+ return instance;
+}
+
+} // namespace base
diff --git a/base/process_util.h b/base/process_util.h
index 082017f..fdc9cf9 100644
--- a/base/process_util.h
+++ b/base/process_util.h
@@ -36,6 +36,7 @@ typedef struct _malloc_zone_t malloc_zone_t;
#include "base/base_export.h"
#include "base/files/file_path.h"
#include "base/process.h"
+#include "base/process/memory.h"
#include "base/process/process_iterator.h"
#include "base/process/process_metrics.h"
@@ -59,10 +60,6 @@ enum TerminationStatus {
TERMINATION_STATUS_MAX_ENUM
};
-#if defined(USE_LINUX_BREAKPAD)
-BASE_EXPORT extern size_t g_oom_size;
-#endif
-
#if defined(OS_WIN)
// Output multi-process printf, cout, cerr, etc to the cmd.exe console that ran
// chrome. This is not thread-safe: only call from main thread.
@@ -75,12 +72,6 @@ BASE_EXPORT ProcessId GetCurrentProcId();
// Returns the ProcessHandle of the current process.
BASE_EXPORT ProcessHandle GetCurrentProcessHandle();
-#if defined(OS_WIN)
-// Returns the module handle to which an address belongs. The reference count
-// of the module is not incremented.
-BASE_EXPORT HMODULE GetModuleFromAddress(void* address);
-#endif
-
// Converts a PID to a process handle. This handle must be closed by
// CloseProcessHandle when you are done with it. Returns true on success.
BASE_EXPORT bool OpenProcessHandle(ProcessId pid, ProcessHandle* handle);
@@ -120,18 +111,6 @@ BASE_EXPORT FilePath GetProcessExecutablePath(ProcessHandle process);
// Returns 0 on failure.
BASE_EXPORT int GetNumberOfThreads(ProcessHandle process);
-// The maximum allowed value for the OOM score.
-const int kMaxOomScore = 1000;
-
-// This adjusts /proc/<pid>/oom_score_adj so the Linux OOM killer will
-// prefer to kill certain process types over others. The range for the
-// adjustment is [-1000, 1000], with [0, 1000] being user accessible.
-// If the Linux system doesn't support the newer oom_score_adj range
-// of [0, 1000], then we revert to using the older oom_adj, and
-// translate the given value into [0, 15]. Some aliasing of values
-// may occur in that case, of course.
-BASE_EXPORT bool AdjustOOMScore(ProcessId process, int score);
-
// /proc/self/exe refers to the current executable.
BASE_EXPORT extern const char kProcSelfExe[];
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
@@ -475,21 +454,6 @@ BASE_EXPORT void EnsureProcessTerminated(ProcessHandle process_handle);
BASE_EXPORT void EnsureProcessGetsReaped(ProcessHandle process_handle);
#endif
-// Enables low fragmentation heap (LFH) for every heaps of this process. This
-// won't have any effect on heaps created after this function call. It will not
-// modify data allocated in the heaps before calling this function. So it is
-// better to call this function early in initialization and again before
-// entering the main loop.
-// Note: Returns true on Windows 2000 without doing anything.
-BASE_EXPORT bool EnableLowFragmentationHeap();
-
-// Enables 'terminate on heap corruption' flag. Helps protect against heap
-// overflow. Has no effect if the OS doesn't provide the necessary facility.
-BASE_EXPORT void EnableTerminationOnHeapCorruption();
-
-// Turns on process termination if memory runs out.
-BASE_EXPORT void EnableTerminationOnOutOfMemory();
-
// If supported on the platform, and the user has sufficent rights, increase
// the current process's scheduling priority to a high priority.
BASE_EXPORT void RaiseProcessToHighPriority();
@@ -505,20 +469,6 @@ BASE_EXPORT void RaiseProcessToHighPriority();
void RestoreDefaultExceptionHandler();
#endif // defined(OS_MACOSX)
-#if defined(OS_MACOSX)
-// Very large images or svg canvases can cause huge mallocs. Skia
-// does tricks on tcmalloc-based systems to allow malloc to fail with
-// a NULL rather than hit the oom crasher. This replicates that for
-// OSX.
-//
-// IF YOU USE THIS WITHOUT CONSULTING YOUR FRIENDLY OSX DEVELOPER,
-// YOUR CODE IS LIKELY TO BE REVERTED. THANK YOU.
-//
-// TODO(shess): Weird place to put it, but this is where the OOM
-// killer currently lives.
-BASE_EXPORT void* UncheckedMalloc(size_t size);
-#endif // defined(OS_MACOSX)
-
} // namespace base
#endif // BASE_PROCESS_UTIL_H_
diff --git a/base/process_util_freebsd.cc b/base/process_util_freebsd.cc
index 71b4291..fc78e05 100644
--- a/base/process_util_freebsd.cc
+++ b/base/process_util_freebsd.cc
@@ -52,17 +52,4 @@ FilePath GetProcessExecutablePath(ProcessHandle process) {
return FilePath(std::string(pathname));
}
-void EnableTerminationOnOutOfMemory() {
- DLOG(WARNING) << "Not feasible.";
-}
-
-void EnableTerminationOnHeapCorruption() {
- // Nothing to do.
-}
-
-bool AdjustOOMScore(ProcessId process, int score) {
- NOTIMPLEMENTED();
- return false;
-}
-
} // namespace base
diff --git a/base/process_util_ios.mm b/base/process_util_ios.mm
index 4d95a7e..1e2b489 100644
--- a/base/process_util_ios.mm
+++ b/base/process_util_ios.mm
@@ -23,14 +23,6 @@ ProcessHandle GetCurrentProcessHandle() {
return GetCurrentProcId();
}
-void EnableTerminationOnHeapCorruption() {
- // On iOS, there nothing to do AFAIK.
-}
-
-void EnableTerminationOnOutOfMemory() {
- // iOS provides this for free!
-}
-
void RaiseProcessToHighPriority() {
// Impossible on iOS. Do nothing.
}
diff --git a/base/process_util_linux.cc b/base/process_util_linux.cc
index e080999..e4a5a34 100644
--- a/base/process_util_linux.cc
+++ b/base/process_util_linux.cc
@@ -50,170 +50,4 @@ int GetNumberOfThreads(ProcessHandle process) {
internal::VM_NUMTHREADS);
}
-namespace {
-
-void OnNoMemorySize(size_t size) {
-#if defined(USE_LINUX_BREAKPAD)
- g_oom_size = size;
-#endif
-
- if (size != 0)
- LOG(FATAL) << "Out of memory, size = " << size;
- LOG(FATAL) << "Out of memory.";
-}
-
-void OnNoMemory() {
- OnNoMemorySize(0);
-}
-
-} // namespace
-
-#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
- !defined(THREAD_SANITIZER) && !defined(LEAK_SANITIZER)
-
-#if defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
-
-extern "C" {
-void* __libc_malloc(size_t size);
-void* __libc_realloc(void* ptr, size_t size);
-void* __libc_calloc(size_t nmemb, size_t size);
-void* __libc_valloc(size_t size);
-void* __libc_pvalloc(size_t size);
-void* __libc_memalign(size_t alignment, size_t size);
-
-// Overriding the system memory allocation functions:
-//
-// For security reasons, we want malloc failures to be fatal. Too much code
-// doesn't check for a NULL return value from malloc and unconditionally uses
-// the resulting pointer. If the first offset that they try to access is
-// attacker controlled, then the attacker can direct the code to access any
-// part of memory.
-//
-// Thus, we define all the standard malloc functions here and mark them as
-// visibility 'default'. This means that they replace the malloc functions for
-// all Chromium code and also for all code in shared libraries. There are tests
-// for this in process_util_unittest.cc.
-//
-// If we are using tcmalloc, then the problem is moot since tcmalloc handles
-// this for us. Thus this code is in a !defined(USE_TCMALLOC) block.
-//
-// If we are testing the binary with AddressSanitizer, we should not
-// redefine malloc and let AddressSanitizer do it instead.
-//
-// We call the real libc functions in this code by using __libc_malloc etc.
-// Previously we tried using dlsym(RTLD_NEXT, ...) but that failed depending on
-// the link order. Since ld.so needs calloc during symbol resolution, it
-// defines its own versions of several of these functions in dl-minimal.c.
-// Depending on the runtime library order, dlsym ended up giving us those
-// functions and bad things happened. See crbug.com/31809
-//
-// This means that any code which calls __libc_* gets the raw libc versions of
-// these functions.
-
-#define DIE_ON_OOM_1(function_name) \
- void* function_name(size_t) __attribute__ ((visibility("default"))); \
- \
- void* function_name(size_t size) { \
- void* ret = __libc_##function_name(size); \
- if (ret == NULL && size != 0) \
- OnNoMemorySize(size); \
- return ret; \
- }
-
-#define DIE_ON_OOM_2(function_name, arg1_type) \
- void* function_name(arg1_type, size_t) \
- __attribute__ ((visibility("default"))); \
- \
- void* function_name(arg1_type arg1, size_t size) { \
- void* ret = __libc_##function_name(arg1, size); \
- if (ret == NULL && size != 0) \
- OnNoMemorySize(size); \
- return ret; \
- }
-
-DIE_ON_OOM_1(malloc)
-DIE_ON_OOM_1(valloc)
-DIE_ON_OOM_1(pvalloc)
-
-DIE_ON_OOM_2(calloc, size_t)
-DIE_ON_OOM_2(realloc, void*)
-DIE_ON_OOM_2(memalign, size_t)
-
-// posix_memalign has a unique signature and doesn't have a __libc_ variant.
-int posix_memalign(void** ptr, size_t alignment, size_t size)
- __attribute__ ((visibility("default")));
-
-int posix_memalign(void** ptr, size_t alignment, size_t size) {
- // This will use the safe version of memalign, above.
- *ptr = memalign(alignment, size);
- return 0;
-}
-
-} // extern C
-
-#else
-
-// TODO(mostynb@opera.com): dlsym dance
-
-#endif // LIBC_GLIBC && !USE_TCMALLOC
-
-#endif // !*_SANITIZER
-
-void EnableTerminationOnHeapCorruption() {
- // On Linux, there nothing to do AFAIK.
-}
-
-void EnableTerminationOnOutOfMemory() {
-#if defined(OS_ANDROID)
- // Android doesn't support setting a new handler.
- DLOG(WARNING) << "Not feasible.";
-#else
- // Set the new-out of memory handler.
- std::set_new_handler(&OnNoMemory);
- // If we're using glibc's allocator, the above functions will override
- // malloc and friends and make them die on out of memory.
-#endif
-}
-
-// NOTE: This is not the only version of this function in the source:
-// the setuid sandbox (in process_util_linux.c, in the sandbox source)
-// also has its own C version.
-bool AdjustOOMScore(ProcessId process, int score) {
- if (score < 0 || score > kMaxOomScore)
- return false;
-
- FilePath oom_path(internal::GetProcPidDir(process));
-
- // Attempt to write the newer oom_score_adj file first.
- FilePath oom_file = oom_path.AppendASCII("oom_score_adj");
- if (file_util::PathExists(oom_file)) {
- std::string score_str = IntToString(score);
- DVLOG(1) << "Adjusting oom_score_adj of " << process << " to "
- << score_str;
- int score_len = static_cast<int>(score_str.length());
- return (score_len == file_util::WriteFile(oom_file,
- score_str.c_str(),
- score_len));
- }
-
- // If the oom_score_adj file doesn't exist, then we write the old
- // style file and translate the oom_adj score to the range 0-15.
- oom_file = oom_path.AppendASCII("oom_adj");
- if (file_util::PathExists(oom_file)) {
- // Max score for the old oom_adj range. Used for conversion of new
- // values to old values.
- const int kMaxOldOomScore = 15;
-
- int converted_score = score * kMaxOldOomScore / kMaxOomScore;
- std::string score_str = IntToString(converted_score);
- DVLOG(1) << "Adjusting oom_adj of " << process << " to " << score_str;
- int score_len = static_cast<int>(score_str.length());
- return (score_len == file_util::WriteFile(oom_file,
- score_str.c_str(),
- score_len));
- }
-
- return false;
-}
-
} // namespace base
diff --git a/base/process_util_mac.mm b/base/process_util_mac.mm
index 89b6db4..183463e 100644
--- a/base/process_util_mac.mm
+++ b/base/process_util_mac.mm
@@ -38,14 +38,6 @@
#include "third_party/apple_apsl/CFBase.h"
#include "third_party/apple_apsl/malloc.h"
-#if ARCH_CPU_32_BITS
-#include <dlfcn.h>
-#include <mach-o/nlist.h>
-
-#include "base/threading/thread_local.h"
-#include "third_party/mach_override/mach_override.h"
-#endif // ARCH_CPU_32_BITS
-
namespace base {
void RestoreDefaultExceptionHandler() {
@@ -65,679 +57,6 @@ void RestoreDefaultExceptionHandler() {
EXCEPTION_DEFAULT, THREAD_STATE_NONE);
}
-
-
-// These are helpers for EnableTerminationOnHeapCorruption, which is a no-op
-// on 64 bit Macs.
-#if ARCH_CPU_32_BITS
-namespace {
-
-// Finds the library path for malloc() and thus the libC part of libSystem,
-// which in Lion is in a separate image.
-const char* LookUpLibCPath() {
- const void* addr = reinterpret_cast<void*>(&malloc);
-
- Dl_info info;
- if (dladdr(addr, &info))
- return info.dli_fname;
-
- DLOG(WARNING) << "Could not find image path for malloc()";
- return NULL;
-}
-
-typedef void(*malloc_error_break_t)(void);
-malloc_error_break_t g_original_malloc_error_break = NULL;
-
-// Returns the function pointer for malloc_error_break. This symbol is declared
-// as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to
-// get it.
-malloc_error_break_t LookUpMallocErrorBreak() {
- const char* lib_c_path = LookUpLibCPath();
- if (!lib_c_path)
- return NULL;
-
- // Only need to look up two symbols, but nlist() requires a NULL-terminated
- // array and takes no count.
- struct nlist nl[3];
- bzero(&nl, sizeof(nl));
-
- // The symbol to find.
- nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break");
-
- // A reference symbol by which the address of the desired symbol will be
- // calculated.
- nl[1].n_un.n_name = const_cast<char*>("_malloc");
-
- int rv = nlist(lib_c_path, nl);
- if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) {
- return NULL;
- }
-
- // nlist() returns addresses as offsets in the image, not the instruction
- // pointer in memory. Use the known in-memory address of malloc()
- // to compute the offset for malloc_error_break().
- uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc);
- reference_addr -= nl[1].n_value;
- reference_addr += nl[0].n_value;
-
- return reinterpret_cast<malloc_error_break_t>(reference_addr);
-}
-
-// Combines ThreadLocalBoolean with AutoReset. It would be convenient
-// to compose ThreadLocalPointer<bool> with base::AutoReset<bool>, but that
-// would require allocating some storage for the bool.
-class ThreadLocalBooleanAutoReset {
- public:
- ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value)
- : scoped_tlb_(tlb),
- original_value_(tlb->Get()) {
- scoped_tlb_->Set(new_value);
- }
- ~ThreadLocalBooleanAutoReset() {
- scoped_tlb_->Set(original_value_);
- }
-
- private:
- ThreadLocalBoolean* scoped_tlb_;
- bool original_value_;
-
- DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset);
-};
-
-base::LazyInstance<ThreadLocalBoolean>::Leaky
- g_unchecked_malloc = LAZY_INSTANCE_INITIALIZER;
-
-// NOTE(shess): This is called when the malloc library noticed that the heap
-// is fubar. Avoid calls which will re-enter the malloc library.
-void CrMallocErrorBreak() {
- g_original_malloc_error_break();
-
- // Out of memory is certainly not heap corruption, and not necessarily
- // something for which the process should be terminated. Leave that decision
- // to the OOM killer. The EBADF case comes up because the malloc library
- // attempts to log to ASL (syslog) before calling this code, which fails
- // accessing a Unix-domain socket because of sandboxing.
- if (errno == ENOMEM || (errno == EBADF && g_unchecked_malloc.Get().Get()))
- return;
-
- // A unit test checks this error message, so it needs to be in release builds.
- char buf[1024] =
- "Terminating process due to a potential for future heap corruption: "
- "errno=";
- char errnobuf[] = {
- '0' + ((errno / 100) % 10),
- '0' + ((errno / 10) % 10),
- '0' + (errno % 10),
- '\000'
- };
- COMPILE_ASSERT(ELAST <= 999, errno_too_large_to_encode);
- strlcat(buf, errnobuf, sizeof(buf));
- RAW_LOG(ERROR, buf);
-
- // Crash by writing to NULL+errno to allow analyzing errno from
- // crash dump info (setting a breakpad key would re-enter the malloc
- // library). Max documented errno in intro(2) is actually 102, but
- // it really just needs to be "small" to stay on the right vm page.
- const int kMaxErrno = 256;
- char* volatile death_ptr = NULL;
- death_ptr += std::min(errno, kMaxErrno);
- *death_ptr = '!';
-}
-
-} // namespace
-#endif // ARCH_CPU_32_BITS
-
-void EnableTerminationOnHeapCorruption() {
-#if defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
- // AddressSanitizer handles heap corruption, and on 64 bit Macs, the malloc
- // system automatically abort()s on heap corruption.
- return;
-#else
- // Only override once, otherwise CrMallocErrorBreak() will recurse
- // to itself.
- if (g_original_malloc_error_break)
- return;
-
- malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak();
- if (!malloc_error_break) {
- DLOG(WARNING) << "Could not find malloc_error_break";
- return;
- }
-
- mach_error_t err = mach_override_ptr(
- (void*)malloc_error_break,
- (void*)&CrMallocErrorBreak,
- (void**)&g_original_malloc_error_break);
-
- if (err != err_none)
- DLOG(WARNING) << "Could not override malloc_error_break; error = " << err;
-#endif // defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
-}
-
-// ------------------------------------------------------------------------
-
-namespace {
-
-bool g_oom_killer_enabled;
-
-// Starting with Mac OS X 10.7, the zone allocators set up by the system are
-// read-only, to prevent them from being overwritten in an attack. However,
-// blindly unprotecting and reprotecting the zone allocators fails with
-// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
-// memory in its bss. Explicit saving/restoring of the protection is required.
-//
-// This function takes a pointer to a malloc zone, de-protects it if necessary,
-// and returns (in the out parameters) a region of memory (if any) to be
-// re-protected when modifications are complete. This approach assumes that
-// there is no contention for the protection of this memory.
-void DeprotectMallocZone(ChromeMallocZone* default_zone,
- mach_vm_address_t* reprotection_start,
- mach_vm_size_t* reprotection_length,
- vm_prot_t* reprotection_value) {
- mach_port_t unused;
- *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
- struct vm_region_basic_info_64 info;
- mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
- kern_return_t result =
- mach_vm_region(mach_task_self(),
- reprotection_start,
- reprotection_length,
- VM_REGION_BASIC_INFO_64,
- reinterpret_cast<vm_region_info_t>(&info),
- &count,
- &unused);
- CHECK(result == KERN_SUCCESS);
-
- result = mach_port_deallocate(mach_task_self(), unused);
- CHECK(result == KERN_SUCCESS);
-
- // Does the region fully enclose the zone pointers? Possibly unwarranted
- // simplification used: using the size of a full version 8 malloc zone rather
- // than the actual smaller size if the passed-in zone is not version 8.
- CHECK(*reprotection_start <=
- reinterpret_cast<mach_vm_address_t>(default_zone));
- mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
- reinterpret_cast<mach_vm_size_t>(*reprotection_start);
- CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
-
- if (info.protection & VM_PROT_WRITE) {
- // No change needed; the zone is already writable.
- *reprotection_start = 0;
- *reprotection_length = 0;
- *reprotection_value = VM_PROT_NONE;
- } else {
- *reprotection_value = info.protection;
- result = mach_vm_protect(mach_task_self(),
- *reprotection_start,
- *reprotection_length,
- false,
- info.protection | VM_PROT_WRITE);
- CHECK(result == KERN_SUCCESS);
- }
-}
-
-// === C malloc/calloc/valloc/realloc/posix_memalign ===
-
-typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
- size_t size);
-typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
- size_t num_items,
- size_t size);
-typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
- size_t size);
-typedef void (*free_type)(struct _malloc_zone_t* zone,
- void* ptr);
-typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
- void* ptr,
- size_t size);
-typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
- size_t alignment,
- size_t size);
-
-malloc_type g_old_malloc;
-calloc_type g_old_calloc;
-valloc_type g_old_valloc;
-free_type g_old_free;
-realloc_type g_old_realloc;
-memalign_type g_old_memalign;
-
-malloc_type g_old_malloc_purgeable;
-calloc_type g_old_calloc_purgeable;
-valloc_type g_old_valloc_purgeable;
-free_type g_old_free_purgeable;
-realloc_type g_old_realloc_purgeable;
-memalign_type g_old_memalign_purgeable;
-
-void* oom_killer_malloc(struct _malloc_zone_t* zone,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_malloc(zone, size);
- if (!result && size)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_calloc(struct _malloc_zone_t* zone,
- size_t num_items,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_calloc(zone, num_items, size);
- if (!result && num_items && size)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_valloc(struct _malloc_zone_t* zone,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_valloc(zone, size);
- if (!result && size)
- debug::BreakDebugger();
- return result;
-}
-
-void oom_killer_free(struct _malloc_zone_t* zone,
- void* ptr) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- g_old_free(zone, ptr);
-}
-
-void* oom_killer_realloc(struct _malloc_zone_t* zone,
- void* ptr,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_realloc(zone, ptr, size);
- if (!result && size)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_memalign(struct _malloc_zone_t* zone,
- size_t alignment,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_memalign(zone, alignment, size);
- // Only die if posix_memalign would have returned ENOMEM, since there are
- // other reasons why NULL might be returned (see
- // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
- if (!result && size && alignment >= sizeof(void*)
- && (alignment & (alignment - 1)) == 0) {
- debug::BreakDebugger();
- }
- return result;
-}
-
-void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_malloc_purgeable(zone, size);
- if (!result && size)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
- size_t num_items,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_calloc_purgeable(zone, num_items, size);
- if (!result && num_items && size)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_valloc_purgeable(zone, size);
- if (!result && size)
- debug::BreakDebugger();
- return result;
-}
-
-void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
- void* ptr) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- g_old_free_purgeable(zone, ptr);
-}
-
-void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
- void* ptr,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_realloc_purgeable(zone, ptr, size);
- if (!result && size)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
- size_t alignment,
- size_t size) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
-#endif // ARCH_CPU_32_BITS
- void* result = g_old_memalign_purgeable(zone, alignment, size);
- // Only die if posix_memalign would have returned ENOMEM, since there are
- // other reasons why NULL might be returned (see
- // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
- if (!result && size && alignment >= sizeof(void*)
- && (alignment & (alignment - 1)) == 0) {
- debug::BreakDebugger();
- }
- return result;
-}
-
-// === C++ operator new ===
-
-void oom_killer_new() {
- debug::BreakDebugger();
-}
-
-// === Core Foundation CFAllocators ===
-
-bool CanGetContextForCFAllocator() {
- return !base::mac::IsOSLaterThanMountainLion_DontCallThis();
-}
-
-CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
- if (base::mac::IsOSSnowLeopard()) {
- ChromeCFAllocatorLeopards* our_allocator =
- const_cast<ChromeCFAllocatorLeopards*>(
- reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator));
- return &our_allocator->_context;
- } else if (base::mac::IsOSLion() || base::mac::IsOSMountainLion()) {
- ChromeCFAllocatorLions* our_allocator =
- const_cast<ChromeCFAllocatorLions*>(
- reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
- return &our_allocator->_context;
- } else {
- return NULL;
- }
-}
-
-CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
-CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
-CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
-
-void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
- CFOptionFlags hint,
- void* info) {
- void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
- if (!result)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
- CFOptionFlags hint,
- void* info) {
- void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
- if (!result)
- debug::BreakDebugger();
- return result;
-}
-
-void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
- CFOptionFlags hint,
- void* info) {
- void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
- if (!result)
- debug::BreakDebugger();
- return result;
-}
-
-// === Cocoa NSObject allocation ===
-
-typedef id (*allocWithZone_t)(id, SEL, NSZone*);
-allocWithZone_t g_old_allocWithZone;
-
-id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
-{
- id result = g_old_allocWithZone(self, _cmd, zone);
- if (!result)
- debug::BreakDebugger();
- return result;
-}
-
-} // namespace
-
-void* UncheckedMalloc(size_t size) {
- if (g_old_malloc) {
-#if ARCH_CPU_32_BITS
- ScopedClearErrno clear_errno;
- ThreadLocalBooleanAutoReset flag(g_unchecked_malloc.Pointer(), true);
-#endif // ARCH_CPU_32_BITS
- return g_old_malloc(malloc_default_zone(), size);
- }
- return malloc(size);
-}
-
-void EnableTerminationOnOutOfMemory() {
- if (g_oom_killer_enabled)
- return;
-
- g_oom_killer_enabled = true;
-
- // === C malloc/calloc/valloc/realloc/posix_memalign ===
-
- // This approach is not perfect, as requests for amounts of memory larger than
- // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
- // still fail with a NULL rather than dying (see
- // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
- // Unfortunately, it's the best we can do. Also note that this does not affect
- // allocations from non-default zones.
-
- CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
- !g_old_memalign) << "Old allocators unexpectedly non-null";
-
- CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
- !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
- !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
-
-#if !defined(ADDRESS_SANITIZER)
- // Don't do anything special on OOM for the malloc zones replaced by
- // AddressSanitizer, as modifying or protecting them may not work correctly.
-
- ChromeMallocZone* default_zone =
- reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
- ChromeMallocZone* purgeable_zone =
- reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
-
- mach_vm_address_t default_reprotection_start = 0;
- mach_vm_size_t default_reprotection_length = 0;
- vm_prot_t default_reprotection_value = VM_PROT_NONE;
- DeprotectMallocZone(default_zone,
- &default_reprotection_start,
- &default_reprotection_length,
- &default_reprotection_value);
-
- mach_vm_address_t purgeable_reprotection_start = 0;
- mach_vm_size_t purgeable_reprotection_length = 0;
- vm_prot_t purgeable_reprotection_value = VM_PROT_NONE;
- if (purgeable_zone) {
- DeprotectMallocZone(purgeable_zone,
- &purgeable_reprotection_start,
- &purgeable_reprotection_length,
- &purgeable_reprotection_value);
- }
-
- // Default zone
-
- g_old_malloc = default_zone->malloc;
- g_old_calloc = default_zone->calloc;
- g_old_valloc = default_zone->valloc;
- g_old_free = default_zone->free;
- g_old_realloc = default_zone->realloc;
- CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free &&
- g_old_realloc)
- << "Failed to get system allocation functions.";
-
- default_zone->malloc = oom_killer_malloc;
- default_zone->calloc = oom_killer_calloc;
- default_zone->valloc = oom_killer_valloc;
- default_zone->free = oom_killer_free;
- default_zone->realloc = oom_killer_realloc;
-
- if (default_zone->version >= 5) {
- g_old_memalign = default_zone->memalign;
- if (g_old_memalign)
- default_zone->memalign = oom_killer_memalign;
- }
-
- // Purgeable zone (if it exists)
-
- if (purgeable_zone) {
- g_old_malloc_purgeable = purgeable_zone->malloc;
- g_old_calloc_purgeable = purgeable_zone->calloc;
- g_old_valloc_purgeable = purgeable_zone->valloc;
- g_old_free_purgeable = purgeable_zone->free;
- g_old_realloc_purgeable = purgeable_zone->realloc;
- CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
- g_old_valloc_purgeable && g_old_free_purgeable &&
- g_old_realloc_purgeable)
- << "Failed to get system allocation functions.";
-
- purgeable_zone->malloc = oom_killer_malloc_purgeable;
- purgeable_zone->calloc = oom_killer_calloc_purgeable;
- purgeable_zone->valloc = oom_killer_valloc_purgeable;
- purgeable_zone->free = oom_killer_free_purgeable;
- purgeable_zone->realloc = oom_killer_realloc_purgeable;
-
- if (purgeable_zone->version >= 5) {
- g_old_memalign_purgeable = purgeable_zone->memalign;
- if (g_old_memalign_purgeable)
- purgeable_zone->memalign = oom_killer_memalign_purgeable;
- }
- }
-
- // Restore protection if it was active.
-
- if (default_reprotection_start) {
- kern_return_t result = mach_vm_protect(mach_task_self(),
- default_reprotection_start,
- default_reprotection_length,
- false,
- default_reprotection_value);
- CHECK(result == KERN_SUCCESS);
- }
-
- if (purgeable_reprotection_start) {
- kern_return_t result = mach_vm_protect(mach_task_self(),
- purgeable_reprotection_start,
- purgeable_reprotection_length,
- false,
- purgeable_reprotection_value);
- CHECK(result == KERN_SUCCESS);
- }
-#endif
-
- // === C malloc_zone_batch_malloc ===
-
- // batch_malloc is omitted because the default malloc zone's implementation
- // only supports batch_malloc for "tiny" allocations from the free list. It
- // will fail for allocations larger than "tiny", and will only allocate as
- // many blocks as it's able to from the free list. These factors mean that it
- // can return less than the requested memory even in a non-out-of-memory
- // situation. There's no good way to detect whether a batch_malloc failure is
- // due to these other factors, or due to genuine memory or address space
- // exhaustion. The fact that it only allocates space from the "tiny" free list
- // means that it's likely that a failure will not be due to memory exhaustion.
- // Similarly, these constraints on batch_malloc mean that callers must always
- // be expecting to receive less memory than was requested, even in situations
- // where memory pressure is not a concern. Finally, the only public interface
- // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
- // system's malloc implementation. It's unlikely that anyone's even heard of
- // it.
-
- // === C++ operator new ===
-
- // Yes, operator new does call through to malloc, but this will catch failures
- // that our imperfect handling of malloc cannot.
-
- std::set_new_handler(oom_killer_new);
-
-#ifndef ADDRESS_SANITIZER
- // === Core Foundation CFAllocators ===
-
- // This will not catch allocation done by custom allocators, but will catch
- // all allocation done by system-provided ones.
-
- CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
- !g_old_cfallocator_malloc_zone)
- << "Old allocators unexpectedly non-null";
-
- bool cf_allocator_internals_known = CanGetContextForCFAllocator();
-
- if (cf_allocator_internals_known) {
- CFAllocatorContext* context =
- ContextForCFAllocator(kCFAllocatorSystemDefault);
- CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
- g_old_cfallocator_system_default = context->allocate;
- CHECK(g_old_cfallocator_system_default)
- << "Failed to get kCFAllocatorSystemDefault allocation function.";
- context->allocate = oom_killer_cfallocator_system_default;
-
- context = ContextForCFAllocator(kCFAllocatorMalloc);
- CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
- g_old_cfallocator_malloc = context->allocate;
- CHECK(g_old_cfallocator_malloc)
- << "Failed to get kCFAllocatorMalloc allocation function.";
- context->allocate = oom_killer_cfallocator_malloc;
-
- context = ContextForCFAllocator(kCFAllocatorMallocZone);
- CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
- g_old_cfallocator_malloc_zone = context->allocate;
- CHECK(g_old_cfallocator_malloc_zone)
- << "Failed to get kCFAllocatorMallocZone allocation function.";
- context->allocate = oom_killer_cfallocator_malloc_zone;
- } else {
- NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
- "CFAllocator will not result in termination. http://crbug.com/45650");
- }
-#endif
-
- // === Cocoa NSObject allocation ===
-
- // Note that both +[NSObject new] and +[NSObject alloc] call through to
- // +[NSObject allocWithZone:].
-
- CHECK(!g_old_allocWithZone)
- << "Old allocator unexpectedly non-null";
-
- Class nsobject_class = [NSObject class];
- Method orig_method = class_getClassMethod(nsobject_class,
- @selector(allocWithZone:));
- g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
- method_getImplementation(orig_method));
- CHECK(g_old_allocWithZone)
- << "Failed to get allocWithZone allocation function.";
- method_setImplementation(orig_method,
- reinterpret_cast<IMP>(oom_killer_allocWithZone));
-}
-
ProcessId GetParentProcessId(ProcessHandle process) {
struct kinfo_proc info;
size_t length = sizeof(struct kinfo_proc);
diff --git a/base/process_util_openbsd.cc b/base/process_util_openbsd.cc
index 24230f7..11d0407 100644
--- a/base/process_util_openbsd.cc
+++ b/base/process_util_openbsd.cc
@@ -64,10 +64,4 @@ FilePath GetProcessExecutablePath(ProcessHandle process) {
return FilePath();
}
-void EnableTerminationOnOutOfMemory() {
-}
-
-void EnableTerminationOnHeapCorruption() {
-}
-
} // namespace base
diff --git a/base/process_util_unittest.cc b/base/process_util_unittest.cc
index d3a4100..4a3d36d 100644
--- a/base/process_util_unittest.cc
+++ b/base/process_util_unittest.cc
@@ -44,7 +44,6 @@
#if defined(OS_MACOSX)
#include <mach/vm_param.h>
#include <malloc/malloc.h>
-#include "base/process_util_unittest_mac.h"
#endif
using base::FilePath;
@@ -76,12 +75,6 @@ const int kExpectedKilledExitCode = 1;
const int kExpectedStillRunningExitCode = 0;
#endif
-#if defined(OS_WIN)
-// HeapQueryInformation function pointer.
-typedef BOOL (WINAPI* HeapQueryFn) \
- (HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T, PSIZE_T);
-#endif
-
// Sleeps until file filename is created.
void WaitToDie(const char* filename) {
FILE* fp;
@@ -213,25 +206,6 @@ TEST_F(ProcessUtilTest, GetProcId) {
EXPECT_NE(id1, id2);
base::CloseProcessHandle(handle);
}
-
-TEST_F(ProcessUtilTest, GetModuleFromAddress) {
- // Since the unit tests are their own EXE, this should be
- // equivalent to the EXE's HINSTANCE.
- //
- // kExpectedKilledExitCode is a constant in this file and
- // therefore within the unit test EXE.
- EXPECT_EQ(::GetModuleHandle(NULL),
- base::GetModuleFromAddress(
- const_cast<int*>(&kExpectedKilledExitCode)));
-
- // Any address within the kernel32 module should return
- // kernel32's HMODULE. Our only assumption here is that
- // kernel32 is larger than 4 bytes.
- HMODULE kernel32 = ::GetModuleHandle(L"kernel32.dll");
- HMODULE kernel32_from_address =
- base::GetModuleFromAddress(reinterpret_cast<DWORD*>(kernel32) + 1);
- EXPECT_EQ(kernel32, kernel32_from_address);
-}
#endif
#if !defined(OS_MACOSX)
@@ -418,49 +392,6 @@ TEST_F(ProcessUtilTest, GetSystemMemoryInfo) {
// TODO(estade): if possible, port these 2 tests.
#if defined(OS_WIN)
-TEST_F(ProcessUtilTest, EnableLFH) {
- ASSERT_TRUE(base::EnableLowFragmentationHeap());
- if (IsDebuggerPresent()) {
- // Under these conditions, LFH can't be enabled. There's no point to test
- // anything.
- const char* no_debug_env = getenv("_NO_DEBUG_HEAP");
- if (!no_debug_env || strcmp(no_debug_env, "1"))
- return;
- }
- HMODULE kernel32 = GetModuleHandle(L"kernel32.dll");
- ASSERT_TRUE(kernel32 != NULL);
- HeapQueryFn heap_query = reinterpret_cast<HeapQueryFn>(GetProcAddress(
- kernel32,
- "HeapQueryInformation"));
-
- // On Windows 2000, the function is not exported. This is not a reason to
- // fail but we won't be able to retrieves information about the heap, so we
- // should stop here.
- if (heap_query == NULL)
- return;
-
- HANDLE heaps[1024] = { 0 };
- unsigned number_heaps = GetProcessHeaps(1024, heaps);
- EXPECT_GT(number_heaps, 0u);
- for (unsigned i = 0; i < number_heaps; ++i) {
- ULONG flag = 0;
- SIZE_T length;
- ASSERT_NE(0, heap_query(heaps[i],
- HeapCompatibilityInformation,
- &flag,
- sizeof(flag),
- &length));
- // If flag is 0, the heap is a standard heap that does not support
- // look-asides. If flag is 1, the heap supports look-asides. If flag is 2,
- // the heap is a low-fragmentation heap (LFH). Note that look-asides are not
- // supported on the LFH.
-
- // We don't have any documented way of querying the HEAP_NO_SERIALIZE flag.
- EXPECT_LE(flag, 2u);
- EXPECT_NE(flag, 1u);
- }
-}
-
TEST_F(ProcessUtilTest, CalcFreeMemory) {
scoped_ptr<base::ProcessMetrics> metrics(
base::ProcessMetrics::CreateProcessMetrics(::GetCurrentProcess()));
@@ -545,60 +476,6 @@ TEST_F(ProcessUtilTest, LaunchAsUser) {
#endif // defined(OS_WIN)
-#if defined(OS_MACOSX)
-
-// For the following Mac tests:
-// Note that base::EnableTerminationOnHeapCorruption() is called as part of
-// test suite setup and does not need to be done again, else mach_override
-// will fail.
-
-#if !defined(ADDRESS_SANITIZER)
-// The following code tests the system implementation of malloc() thus no need
-// to test it under AddressSanitizer.
-TEST_F(ProcessUtilTest, MacMallocFailureDoesNotTerminate) {
- // Test that ENOMEM doesn't crash via CrMallocErrorBreak two ways: the exit
- // code and lack of the error string. The number of bytes is one less than
- // MALLOC_ABSOLUTE_MAX_SIZE, more than which the system early-returns NULL and
- // does not call through malloc_error_break(). See the comment at
- // EnableTerminationOnOutOfMemory() for more information.
- void* buf = NULL;
- ASSERT_EXIT(
- {
- base::EnableTerminationOnOutOfMemory();
-
- buf = malloc(std::numeric_limits<size_t>::max() - (2 * PAGE_SIZE) - 1);
- },
- testing::KilledBySignal(SIGTRAP),
- "\\*\\*\\* error: can't allocate region.*"
- "(Terminating process due to a potential for future heap "
- "corruption){0}");
-
- base::debug::Alias(buf);
-}
-#endif // !defined(ADDRESS_SANITIZER)
-
-TEST_F(ProcessUtilTest, MacTerminateOnHeapCorruption) {
- // Assert that freeing an unallocated pointer will crash the process.
- char buf[3];
- asm("" : "=r" (buf)); // Prevent clang from being too smart.
-#if ARCH_CPU_64_BITS
- // On 64 bit Macs, the malloc system automatically abort()s on heap corruption
- // but does not output anything.
- ASSERT_DEATH(free(buf), "");
-#elif defined(ADDRESS_SANITIZER)
- // AddressSanitizer replaces malloc() and prints a different error message on
- // heap corruption.
- ASSERT_DEATH(free(buf), "attempting free on address which "
- "was not malloc\\(\\)-ed");
-#else
- ASSERT_DEATH(free(buf), "being freed.*"
- "\\*\\*\\* set a breakpoint in malloc_error_break to debug.*"
- "Terminating process due to a potential for future heap corruption");
-#endif // ARCH_CPU_64_BITS || defined(ADDRESS_SANITIZER)
-}
-
-#endif // defined(OS_MACOSX)
-
#if defined(OS_POSIX)
namespace {
@@ -1078,230 +955,3 @@ MULTIPROCESS_TEST_MAIN(process_util_test_die_immediately) {
}
#endif // defined(OS_POSIX)
-
-// Android doesn't implement set_new_handler, so we can't use the
-// OutOfMemoryTest cases.
-// OpenBSD does not support these tests either.
-// AddressSanitizer and ThreadSanitizer define the malloc()/free()/etc.
-// functions so that they don't crash if the program is out of memory, so the
-// OOM tests aren't supposed to work.
-// TODO(vandebo) make this work on Windows too.
-#if !defined(OS_ANDROID) && !defined(OS_OPENBSD) && \
- !defined(OS_WIN) && \
- !defined(ADDRESS_SANITIZER) && !defined(THREAD_SANITIZER)
-
-#if defined(USE_TCMALLOC)
-extern "C" {
-int tc_set_new_mode(int mode);
-}
-#endif // defined(USE_TCMALLOC)
-
-class OutOfMemoryDeathTest : public testing::Test {
- public:
- OutOfMemoryDeathTest()
- : value_(NULL),
- // Make test size as large as possible minus a few pages so
- // that alignment or other rounding doesn't make it wrap.
- test_size_(std::numeric_limits<std::size_t>::max() - 12 * 1024),
- signed_test_size_(std::numeric_limits<ssize_t>::max()) {
- }
-
-#if defined(USE_TCMALLOC)
- virtual void SetUp() OVERRIDE {
- tc_set_new_mode(1);
- }
-
- virtual void TearDown() OVERRIDE {
- tc_set_new_mode(0);
- }
-#endif // defined(USE_TCMALLOC)
-
- void SetUpInDeathAssert() {
- // Must call EnableTerminationOnOutOfMemory() because that is called from
- // chrome's main function and therefore hasn't been called yet.
- // Since this call may result in another thread being created and death
- // tests shouldn't be started in a multithread environment, this call
- // should be done inside of the ASSERT_DEATH.
- base::EnableTerminationOnOutOfMemory();
- }
-
- void* value_;
- size_t test_size_;
- ssize_t signed_test_size_;
-};
-
-TEST_F(OutOfMemoryDeathTest, New) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = operator new(test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, NewArray) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = new char[test_size_];
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, Malloc) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = malloc(test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, Realloc) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = realloc(NULL, test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, Calloc) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = calloc(1024, test_size_ / 1024L);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, Valloc) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = valloc(test_size_);
- }, "");
-}
-
-#if defined(OS_LINUX)
-TEST_F(OutOfMemoryDeathTest, Pvalloc) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = pvalloc(test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, Memalign) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = memalign(4, test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, ViaSharedLibraries) {
- // g_try_malloc is documented to return NULL on failure. (g_malloc is the
- // 'safe' default that crashes if allocation fails). However, since we have
- // hopefully overridden malloc, even g_try_malloc should fail. This tests
- // that the run-time symbol resolution is overriding malloc for shared
- // libraries as well as for our code.
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = g_try_malloc(test_size_);
- }, "");
-}
-#endif // OS_LINUX
-
-// Android doesn't implement posix_memalign().
-#if defined(OS_POSIX) && !defined(OS_ANDROID)
-TEST_F(OutOfMemoryDeathTest, Posix_memalign) {
- // Grab the return value of posix_memalign to silence a compiler warning
- // about unused return values. We don't actually care about the return
- // value, since we're asserting death.
- ASSERT_DEATH({
- SetUpInDeathAssert();
- EXPECT_EQ(ENOMEM, posix_memalign(&value_, 8, test_size_));
- }, "");
-}
-#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
-
-#if defined(OS_MACOSX)
-
-// Purgeable zone tests
-
-TEST_F(OutOfMemoryDeathTest, MallocPurgeable) {
- malloc_zone_t* zone = malloc_default_purgeable_zone();
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = malloc_zone_malloc(zone, test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, ReallocPurgeable) {
- malloc_zone_t* zone = malloc_default_purgeable_zone();
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = malloc_zone_realloc(zone, NULL, test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, CallocPurgeable) {
- malloc_zone_t* zone = malloc_default_purgeable_zone();
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = malloc_zone_calloc(zone, 1024, test_size_ / 1024L);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, VallocPurgeable) {
- malloc_zone_t* zone = malloc_default_purgeable_zone();
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = malloc_zone_valloc(zone, test_size_);
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, PosixMemalignPurgeable) {
- malloc_zone_t* zone = malloc_default_purgeable_zone();
- ASSERT_DEATH({
- SetUpInDeathAssert();
- value_ = malloc_zone_memalign(zone, 8, test_size_);
- }, "");
-}
-
-// Since these allocation functions take a signed size, it's possible that
-// calling them just once won't be enough to exhaust memory. In the 32-bit
-// environment, it's likely that these allocation attempts will fail because
-// not enough contiguous address space is available. In the 64-bit environment,
-// it's likely that they'll fail because they would require a preposterous
-// amount of (virtual) memory.
-
-TEST_F(OutOfMemoryDeathTest, CFAllocatorSystemDefault) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- while ((value_ =
- base::AllocateViaCFAllocatorSystemDefault(signed_test_size_))) {}
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, CFAllocatorMalloc) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- while ((value_ =
- base::AllocateViaCFAllocatorMalloc(signed_test_size_))) {}
- }, "");
-}
-
-TEST_F(OutOfMemoryDeathTest, CFAllocatorMallocZone) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- while ((value_ =
- base::AllocateViaCFAllocatorMallocZone(signed_test_size_))) {}
- }, "");
-}
-
-#if !defined(ARCH_CPU_64_BITS)
-
-// See process_util_unittest_mac.mm for an explanation of why this test isn't
-// run in the 64-bit environment.
-
-TEST_F(OutOfMemoryDeathTest, PsychoticallyBigObjCObject) {
- ASSERT_DEATH({
- SetUpInDeathAssert();
- while ((value_ = base::AllocatePsychoticallyBigObjCObject())) {}
- }, "");
-}
-
-#endif // !ARCH_CPU_64_BITS
-#endif // OS_MACOSX
-
-#endif // !defined(OS_ANDROID) && !defined(OS_OPENBSD) &&
- // !defined(OS_WIN) && !defined(ADDRESS_SANITIZER)
diff --git a/base/process_util_win.cc b/base/process_util_win.cc
index 5fadf4a..2230105 100644
--- a/base/process_util_win.cc
+++ b/base/process_util_win.cc
@@ -50,18 +50,6 @@ static const int kWaitInterval = 2000;
// process goes away.
const DWORD kProcessKilledExitCode = 1;
-// HeapSetInformation function pointer.
-typedef BOOL (WINAPI* HeapSetFn)(HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T);
-
-void OnNoMemory() {
- // Kill the process. This is important for security, since WebKit doesn't
- // NULL-check many memory allocations. If a malloc fails, returns NULL, and
- // the buffer is then used, it provides a handy mapping of memory starting at
- // address 0 for an attacker to utilize.
- __debugbreak();
- _exit(1);
-}
-
class TimerExpiredTask : public win::ObjectWatcher::Delegate {
public:
explicit TimerExpiredTask(ProcessHandle process);
@@ -183,17 +171,6 @@ ProcessHandle GetCurrentProcessHandle() {
return ::GetCurrentProcess();
}
-HMODULE GetModuleFromAddress(void* address) {
- HMODULE instance = NULL;
- if (!::GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
- GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
- static_cast<char*>(address),
- &instance)) {
- NOTREACHED();
- }
- return instance;
-}
-
bool OpenProcessHandle(ProcessId pid, ProcessHandle* handle) {
// We try to limit privileges granted to the handle. If you need this
// for test code, consider using OpenPrivilegedProcessHandle instead of
@@ -632,50 +609,6 @@ void EnsureProcessTerminated(ProcessHandle process) {
base::TimeDelta::FromMilliseconds(kWaitInterval));
}
-bool EnableLowFragmentationHeap() {
- HMODULE kernel32 = GetModuleHandle(L"kernel32.dll");
- HeapSetFn heap_set = reinterpret_cast<HeapSetFn>(GetProcAddress(
- kernel32,
- "HeapSetInformation"));
-
- // On Windows 2000, the function is not exported. This is not a reason to
- // fail.
- if (!heap_set)
- return true;
-
- unsigned number_heaps = GetProcessHeaps(0, NULL);
- if (!number_heaps)
- return false;
-
- // Gives us some extra space in the array in case a thread is creating heaps
- // at the same time we're querying them.
- static const int MARGIN = 8;
- scoped_ptr<HANDLE[]> heaps(new HANDLE[number_heaps + MARGIN]);
- number_heaps = GetProcessHeaps(number_heaps + MARGIN, heaps.get());
- if (!number_heaps)
- return false;
-
- for (unsigned i = 0; i < number_heaps; ++i) {
- ULONG lfh_flag = 2;
- // Don't bother with the result code. It may fails on heaps that have the
- // HEAP_NO_SERIALIZE flag. This is expected and not a problem at all.
- heap_set(heaps[i],
- HeapCompatibilityInformation,
- &lfh_flag,
- sizeof(lfh_flag));
- }
- return true;
-}
-
-void EnableTerminationOnHeapCorruption() {
- // Ignore the result code. Supported on XP SP3 and Vista.
- HeapSetInformation(NULL, HeapEnableTerminationOnCorruption, NULL, 0);
-}
-
-void EnableTerminationOnOutOfMemory() {
- std::set_new_handler(&OnNoMemory);
-}
-
void RaiseProcessToHighPriority() {
SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
}