summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--base/security_unittest.cc84
-rw-r--r--third_party/tcmalloc/chromium/src/system-alloc.cc72
2 files changed, 104 insertions, 52 deletions
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index 531159d..3fd4117 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -6,8 +6,10 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <unistd.h>
#include <algorithm>
#include <limits>
@@ -210,43 +212,59 @@ void PrintProcSelfMaps() {
fprintf(stdout, "%s\n", buffer);
}
+// Check if ptr1 and ptr2 are separated by less than size chars.
+bool ArePointersToSameArea(void* ptr1, void* ptr2, size_t size) {
+ ptrdiff_t ptr_diff = reinterpret_cast<char*>(std::max(ptr1, ptr2)) -
+ reinterpret_cast<char*>(std::min(ptr1, ptr2));
+ return static_cast<size_t>(ptr_diff) <= size;
+}
+
// Check if TCMalloc uses an underlying random memory allocator.
TEST(SecurityTest, ALLOC_TEST(RandomMemoryAllocations)) {
if (IsTcMallocBypassed())
return;
- // Two successsive calls to mmap() have roughly one chance out of 2^6 to
- // have the same two high order nibbles, which is what we are looking at in
- // this test. (In the implementation, we mask these two nibbles with 0x3f,
- // hence the 6 bits).
- // With 32 allocations, we see ~16 that end-up in different buckets (i.e.
- // zones mapped via mmap(), so the chances of this test flaking is roughly
- // 2^-(6*15).
- const int kAllocNumber = 32;
- // Make kAllocNumber successive allocations of growing size and compare the
- // successive pointers to detect adjacent mappings. We grow the size because
- // TCMalloc can sometimes over-allocate.
- scoped_ptr<char, base::FreeDeleter> ptr[kAllocNumber];
- for (int i = 0; i < kAllocNumber; ++i) {
- // Grow the Malloc size slightly sub-exponentially.
- const size_t kMallocSize = 1 << (12 + (i>>1));
- ptr[i].reset(static_cast<char*>(malloc(kMallocSize)));
- ASSERT_TRUE(ptr[i] != NULL);
- if (i > 0) {
- // Without mmap randomization, the two high order nibbles
- // of a 47 bits userland address address will be identical.
- // We're only watching the 6 bits that we actually do touch
- // in our implementation.
- const uintptr_t kHighOrderMask = 0x3f0000000000ULL;
- bool pointer_have_same_high_order =
- (reinterpret_cast<size_t>(ptr[i].get()) & kHighOrderMask) ==
- (reinterpret_cast<size_t>(ptr[i - 1].get()) & kHighOrderMask);
- if (!pointer_have_same_high_order) {
- // PrintProcSelfMaps();
- return; // Test passes.
- }
- }
- }
- ASSERT_TRUE(false); // NOTREACHED();
+ size_t kPageSize = 4096; // We support x86_64 only.
+ // Check that malloc() returns an address that is neither the kernel's
+ // un-hinted mmap area, nor the current brk() area. The first malloc() may
+ // not be at a random address because TCMalloc will first exhaust any memory
+ // that it has allocated early on, before starting the sophisticated
+ // allocators.
+ void* default_mmap_heap_address =
+ mmap(0, kPageSize, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(default_mmap_heap_address,
+ static_cast<void*>(MAP_FAILED));
+ ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0);
+ void* brk_heap_address = sbrk(0);
+ ASSERT_NE(brk_heap_address, reinterpret_cast<void*>(-1));
+ ASSERT_TRUE(brk_heap_address != NULL);
+ // 1 MB should get us past what TCMalloc pre-allocated before initializing
+ // the sophisticated allocators.
+ size_t kAllocSize = 1<<20;
+ scoped_ptr<char, base::FreeDeleter> ptr(
+ static_cast<char*>(malloc(kAllocSize)));
+ ASSERT_TRUE(ptr != NULL);
+ // If two pointers are separated by less than 512MB, they are considered
+ // to be in the same area.
+ // Our random pointer could be anywhere within 0x3fffffffffff (46bits),
+ // and we are checking that it's not withing 1GB (30 bits) from two
+ // addresses (brk and mmap heap). We have roughly one chance out of
+ // 2^15 to flake.
+ const size_t kAreaRadius = 1<<29;
+ bool in_default_mmap_heap = ArePointersToSameArea(
+ ptr.get(), default_mmap_heap_address, kAreaRadius);
+ EXPECT_FALSE(in_default_mmap_heap);
+
+ bool in_default_brk_heap = ArePointersToSameArea(
+ ptr.get(), brk_heap_address, kAreaRadius);
+ EXPECT_FALSE(in_default_brk_heap);
+
+ // In the implementation, we always mask our random addresses with
+ // kRandomMask, so we use it as an additional detection mechanism.
+ const uintptr_t kRandomMask = 0x3fffffffffffULL;
+ bool impossible_random_address =
+ reinterpret_cast<uintptr_t>(ptr.get()) & ~kRandomMask;
+ EXPECT_FALSE(impossible_random_address);
}
#endif // (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__)
diff --git a/third_party/tcmalloc/chromium/src/system-alloc.cc b/third_party/tcmalloc/chromium/src/system-alloc.cc
index 05338963..2141d413e 100644
--- a/third_party/tcmalloc/chromium/src/system-alloc.cc
+++ b/third_party/tcmalloc/chromium/src/system-alloc.cc
@@ -137,6 +137,18 @@ void raninit(ranctx* x, u4 seed) {
}
}
+// If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it
+// will simply ignore it. So we give a hint that has a good chance of
+// working.
+// The mmap top-down allocator will normally allocate below TASK_SIZE - gap,
+// with a gap that depends on the max stack size. See x86/mm/mmap.c. We
+// should make allocations that are below this area, which would be
+// 0x7ffbf8000000.
+// We use 0x3ffffffff000 as the mask so that we only "pollute" half of the
+// address space. In the unlikely case where fragmentation would become an
+// issue, the kernel will still have another half to use.
+const uint64_t kRandomAddressMask = 0x3ffffffff000ULL;
+
#endif // defined(ASLR_IS_SUPPORTED)
// Give a random "hint" that is suitable for use with mmap(). This cannot make
@@ -177,22 +189,51 @@ void* GetRandomAddrHint() {
}
uint64_t random_address = (static_cast<uint64_t>(ranval(&ctx)) << 32) |
ranval(&ctx);
- // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it
- // will simply ignore it. So we give a hint that has a good chance of
- // working.
- // The mmap top-down allocator will normally allocate below TASK_SIZE - gap,
- // with a gap that depends on the max stack size. See x86/mm/mmap.c. We
- // should make allocations that are below this area, which would be
- // 0x7ffbf8000000.
- // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the
- // address space. In the unlikely case where fragmentation would become an
- // issue, the kernel will still have another half to use.
// A a bit-wise "and" won't bias our random distribution.
- random_address &= 0x3ffffffff000ULL;
+ random_address &= kRandomAddressMask;
return reinterpret_cast<void*>(random_address);
#endif // ASLR_IS_SUPPORTED
}
+// Allocate |length| bytes of memory using mmap(). The memory will be
+// readable and writeable, but not executable.
+// Like mmap(), we will return MAP_FAILED on failure.
+// |is_aslr_enabled| controls address space layout randomization. When true, we
+// will put the first mapping at a random address and will then try to grow it.
+// If it's not possible to grow an existing mapping, a new one will be created.
+void* AllocWithMmap(size_t length, bool is_aslr_enabled) {
+ // Note: we are protected by the general TCMalloc_SystemAlloc spinlock.
+ static void* address_hint = NULL;
+#if defined(ASLR_IS_SUPPORTED)
+ if (is_aslr_enabled &&
+ (!address_hint ||
+ reinterpret_cast<uint64_t>(address_hint) & ~kRandomAddressMask)) {
+ address_hint = GetRandomAddrHint();
+ }
+#endif // ASLR_IS_SUPPORTED
+
+ // address_hint is likely to make us grow an existing mapping.
+ void* result = mmap(address_hint, length, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+#if defined(ASLR_IS_SUPPORTED)
+ if (result == address_hint) {
+ // If mmap() succeeded at a address_hint, our next mmap() will try to grow
+ // the current mapping as long as it's compatible with our ASLR mask.
+ // This has been done for performance reasons, see crbug.com/173371.
+ // It should be possible to strike a better balance between performance
+ // and security but will be done at a later date.
+ // If this overflows, it could only set address_hint to NULL, which is
+ // what we want (and can't happen on the currently supported architecture).
+ address_hint = static_cast<char*>(result) + length;
+ } else {
+ // mmap failed or a collision prevented the kernel from honoring the hint,
+ // reset the hint.
+ address_hint = NULL;
+ }
+#endif // ASLR_IS_SUPPORTED
+ return result;
+}
+
} // Anonymous namespace to avoid name conflicts on "CheckAddressBits".
COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*),
@@ -405,14 +446,7 @@ void* MmapSysAllocator::Alloc(size_t size, size_t *actual_size,
// size + alignment < (1<<NBITS).
// and extra <= alignment
// therefore size + extra < (1<<NBITS)
- void* address_hint = NULL;
- if (FLAGS_malloc_random_allocator) {
- address_hint = GetRandomAddrHint();
- }
- void* result = mmap(address_hint, size + extra,
- PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS,
- -1, 0);
+ void* result = AllocWithMmap(size + extra, FLAGS_malloc_random_allocator);
if (result == reinterpret_cast<void*>(MAP_FAILED)) {
return NULL;
}