summaryrefslogtreecommitdiffstats
path: root/third_party/tcmalloc/chromium/src/base
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/tcmalloc/chromium/src/base')
-rw-r--r--third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-gcc.h234
-rw-r--r--third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-generic.h3
-rw-r--r--third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h156
-rw-r--r--third_party/tcmalloc/chromium/src/base/atomicops-internals-windows.h127
-rw-r--r--third_party/tcmalloc/chromium/src/base/basictypes.h26
-rw-r--r--third_party/tcmalloc/chromium/src/base/cycleclock.h39
-rw-r--r--third_party/tcmalloc/chromium/src/base/dynamic_annotations.c18
-rw-r--r--third_party/tcmalloc/chromium/src/base/dynamic_annotations.h38
-rw-r--r--third_party/tcmalloc/chromium/src/base/elf_mem_image.cc433
-rw-r--r--third_party/tcmalloc/chromium/src/base/elf_mem_image.h134
-rw-r--r--third_party/tcmalloc/chromium/src/base/googleinit.h44
-rw-r--r--third_party/tcmalloc/chromium/src/base/linux_syscall_support.h1375
-rw-r--r--third_party/tcmalloc/chromium/src/base/logging.h8
-rw-r--r--third_party/tcmalloc/chromium/src/base/low_level_alloc.cc4
-rw-r--r--third_party/tcmalloc/chromium/src/base/spinlock_internal.cc25
-rw-r--r--third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h5
-rw-r--r--third_party/tcmalloc/chromium/src/base/spinlock_posix-inl.h2
-rw-r--r--third_party/tcmalloc/chromium/src/base/spinlock_win32-inl.h7
-rw-r--r--third_party/tcmalloc/chromium/src/base/stl_allocator.h1
-rw-r--r--third_party/tcmalloc/chromium/src/base/sysinfo.cc12
-rw-r--r--third_party/tcmalloc/chromium/src/base/vdso_support.cc390
-rw-r--r--third_party/tcmalloc/chromium/src/base/vdso_support.h109
22 files changed, 1316 insertions, 1874 deletions
diff --git a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-gcc.h b/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-gcc.h
deleted file mode 100644
index 423e993..0000000
--- a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-gcc.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/* Copyright (c) 2010, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Lei Zhang, Sasha Levitskiy
- */
-
-// This file is an internal atomic implementation, use base/atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-#include <stdio.h>
-#include "base/basictypes.h" // For COMPILE_ASSERT
-
-typedef int32_t Atomic32;
-
-namespace base {
-namespace subtle {
-
-typedef int64_t Atomic64;
-
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-// pLinuxKernelCmpxchg has both acquire and release barrier sematincs.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
- (LinuxKernelCmpxchgFunc) 0xffff0fc0;
-
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
- (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
-
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value = *ptr;
- do {
- if (!pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)) == 0) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- pLinuxKernelMemoryBarrier();
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-
-// 64-bit versions are not implemented yet.
-
-inline void NotImplementedFatalError(const char *function_name) {
- fprintf(stderr, "64-bit %s() not implemented on this platform\n",
- function_name);
- abort();
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- NotImplementedFatalError("NoBarrier_CompareAndSwap");
- return 0;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- NotImplementedFatalError("NoBarrier_AtomicExchange");
- return 0;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- NotImplementedFatalError("NoBarrier_AtomicIncrement");
- return 0;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- NotImplementedFatalError("Barrier_AtomicIncrement");
- return 0;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- NotImplementedFatalError("NoBarrier_Store");
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- NotImplementedFatalError("Release_Store");
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- NotImplementedFatalError("NoBarrier_Load");
- return 0;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = NoBarrier_Load(ptr);
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return NoBarrier_Load(ptr);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-} // namespace base::subtle
-} // namespace base
-
-#endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-generic.h b/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-generic.h
index 7882b0d..4acb76a 100644
--- a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-generic.h
+++ b/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-generic.h
@@ -39,8 +39,7 @@
#include <stdio.h>
#include <stdlib.h>
-#include "base/macros.h" // For COMPILE_ASSERT
-#include "base/port.h" // ATTRIBUTE_WEAK
+#include "base/basictypes.h"
typedef int32_t Atomic32;
diff --git a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h b/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h
index ee09f32..8d5b9b5 100644
--- a/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h
+++ b/third_party/tcmalloc/chromium/src/base/atomicops-internals-arm-v6plus.h
@@ -42,6 +42,13 @@
#include <stdlib.h>
#include "base/basictypes.h" // For COMPILE_ASSERT
+// The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
+// only some variants support it. For simplicity, we only use exclusive
+// 64-bit load/store in V7 or above.
+#if defined(ARMV7)
+# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
+#endif
+
typedef int32_t Atomic32;
namespace base {
@@ -60,6 +67,10 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
"ldrex %1, [%3]\n"
"mov %0, #0\n"
"teq %1, %4\n"
+ // The following IT (if-then) instruction is needed for the subsequent
+ // conditional instruction STREXEQ when compiling in THUMB mode.
+ // In ARM mode, the compiler/assembler will not generate any code for it.
+ "it eq\n"
"strexeq %0, %5, [%3]\n"
: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
@@ -164,7 +175,114 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-// 64-bit versions are not implemented yet.
+// 64-bit versions are only available if LDREXD and STREXD instructions
+// are available.
+#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
+
+#define BASE_HAS_ATOMIC64 1
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 oldval, res;
+ do {
+ __asm__ __volatile__(
+ "ldrexd %1, [%3]\n"
+ "mov %0, #0\n"
+ "teq %Q1, %Q4\n"
+ // The following IT (if-then) instructions are needed for the subsequent
+ // conditional instructions when compiling in THUMB mode.
+ // In ARM mode, the compiler/assembler will not generate any code for it.
+ "it eq\n"
+ "teqeq %R1, %R4\n"
+ "it eq\n"
+ "strexdeq %0, %5, [%3]\n"
+ : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
+ : "r" (ptr), "Ir" (old_value), "r" (new_value)
+ : "cc");
+ } while (res);
+ return oldval;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ int store_failed;
+ Atomic64 old;
+ __asm__ __volatile__(
+ "1:\n"
+ "ldrexd %1, [%2]\n"
+ "strexd %0, %3, [%2]\n"
+ "teq %0, #0\n"
+ "bne 1b"
+ : "=&r" (store_failed), "=&r" (old)
+ : "r" (ptr), "r" (new_value)
+ : "cc", "memory");
+ return old;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ int store_failed;
+ Atomic64 res;
+ __asm__ __volatile__(
+ "1:\n"
+ "ldrexd %1, [%2]\n"
+ "adds %Q1, %Q1, %Q3\n"
+ "adc %R1, %R1, %R3\n"
+ "strexd %0, %1, [%2]\n"
+ "teq %0, #0\n"
+ "bne 1b"
+ : "=&r" (store_failed), "=&r"(res)
+ : "r" (ptr), "r"(increment)
+ : "cc", "memory");
+ return res;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ int store_failed;
+ Atomic64 res;
+ __asm__ __volatile__(
+ "1:\n"
+ "ldrexd %1, [%2]\n"
+ "adds %Q1, %Q1, %Q3\n"
+ "adc %R1, %R1, %R3\n"
+ "dmb\n"
+ "strexd %0, %1, [%2]\n"
+ "teq %0, #0\n"
+ "bne 1b"
+ : "=&r" (store_failed), "=&r"(res)
+ : "r" (ptr), "r"(increment)
+ : "cc", "memory");
+ return res;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ int store_failed;
+ Atomic64 dummy;
+ __asm__ __volatile__(
+ "1:\n"
+ // Dummy load to lock cache line.
+ "ldrexd %1, [%3]\n"
+ "strexd %0, %2, [%3]\n"
+ "teq %0, #0\n"
+ "bne 1b"
+ : "=&r" (store_failed), "=&r"(dummy)
+ : "r"(value), "r" (ptr)
+ : "cc", "memory");
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ Atomic64 res;
+ __asm__ __volatile__(
+ "ldrexd %0, [%1]\n"
+ "clrex\n"
+ : "=r" (res)
+ : "r"(ptr), "Q"(*ptr));
+ return res;
+}
+
+#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
@@ -201,41 +319,47 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
NotImplementedFatalError("NoBarrier_Store");
}
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- NotImplementedFatalError("Acquire_Store64");
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ NotImplementedFatalError("NoBarrier_Load");
+ return 0;
}
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- NotImplementedFatalError("Release_Store");
+#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ NoBarrier_Store(ptr, value);
+ MemoryBarrier();
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- NotImplementedFatalError("NoBarrier_Load");
- return 0;
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ NoBarrier_Store(ptr, value);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- NotImplementedFatalError("Atomic64 Acquire_Load");
- return 0;
+ Atomic64 value = NoBarrier_Load(ptr);
+ MemoryBarrier();
+ return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- NotImplementedFatalError("Atomic64 Release_Load");
- return 0;
+ MemoryBarrier();
+ return NoBarrier_Load(ptr);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
- NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap");
- return 0;
+ Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
- NotImplementedFatalError("Atomic64 Release_CompareAndSwap");
- return 0;
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
} // namespace subtle ends
diff --git a/third_party/tcmalloc/chromium/src/base/atomicops-internals-windows.h b/third_party/tcmalloc/chromium/src/base/atomicops-internals-windows.h
index 58782a17..bd42c82 100644
--- a/third_party/tcmalloc/chromium/src/base/atomicops-internals-windows.h
+++ b/third_party/tcmalloc/chromium/src/base/atomicops-internals-windows.h
@@ -55,28 +55,74 @@ typedef int64 Atomic64;
// 32-bit low-level operations on any platform
+extern "C" {
+// We use windows intrinsics when we can (they seem to be supported
+// well on MSVC 8.0 and above). Unfortunately, in some
+// environments, <windows.h> and <intrin.h> have conflicting
+// declarations of some other intrinsics, breaking compilation:
+// http://connect.microsoft.com/VisualStudio/feedback/details/262047
+// Therefore, we simply declare the relevant intrinsics ourself.
+
// MinGW has a bug in the header files where it doesn't indicate the
// first argument is volatile -- they're not up to date. See
// http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html
// We have to const_cast away the volatile to avoid compiler warnings.
// TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h
-#ifdef __MINGW32__
-inline LONG InterlockedCompareExchange(volatile LONG* ptr,
- LONG newval, LONG oldval) {
+#if defined(__MINGW32__)
+inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
+ LONG newval, LONG oldval) {
return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval);
}
-inline LONG InterlockedExchange(volatile LONG* ptr, LONG newval) {
+inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
return ::InterlockedExchange(const_cast<LONG*>(ptr), newval);
}
-inline LONG InterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
+inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment);
}
+
+#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
+// Unfortunately, in some environments, <windows.h> and <intrin.h>
+// have conflicting declarations of some intrinsics, breaking
+// compilation. So we declare the intrinsics we need ourselves. See
+// http://connect.microsoft.com/VisualStudio/feedback/details/262047
+LONG _InterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval);
+#pragma intrinsic(_InterlockedCompareExchange)
+inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
+ LONG newval, LONG oldval) {
+ return _InterlockedCompareExchange(ptr, newval, oldval);
+}
+
+LONG _InterlockedExchange(volatile LONG* ptr, LONG newval);
+#pragma intrinsic(_InterlockedExchange)
+inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
+ return _InterlockedExchange(ptr, newval);
+}
+
+LONG _InterlockedExchangeAdd(volatile LONG* ptr, LONG increment);
+#pragma intrinsic(_InterlockedExchangeAdd)
+inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
+ return _InterlockedExchangeAdd(ptr, increment);
+}
+
+#else
+inline LONG FastInterlockedCompareExchange(volatile LONG* ptr,
+ LONG newval, LONG oldval) {
+ return ::InterlockedCompareExchange(ptr, newval, oldval);
+}
+inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) {
+ return ::InterlockedExchange(ptr, newval);
+}
+inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) {
+ return ::InterlockedExchangeAdd(ptr, increment);
+}
+
#endif // ifdef __MINGW32__
+} // extern "C"
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- LONG result = InterlockedCompareExchange(
+ LONG result = FastInterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
@@ -85,7 +131,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
- LONG result = InterlockedExchange(
+ LONG result = FastInterlockedExchange(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
@@ -93,7 +139,7 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
- return InterlockedExchangeAdd(
+ return FastInterlockedExchangeAdd(
reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(increment)) + increment;
}
@@ -173,27 +219,68 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
-// Like for the __MINGW32__ case above, this works around a header
-// error in mingw, where it's missing 'volatile'.
-#ifdef __MINGW64__
-inline PVOID InterlockedCompareExchangePointer(volatile PVOID* ptr,
- PVOID newval, PVOID oldval) {
+// These are the intrinsics needed for 64-bit operations. Similar to the
+// 32-bit case above.
+
+extern "C" {
+#if defined(__MINGW64__)
+inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
+ PVOID newval, PVOID oldval) {
return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
newval, oldval);
}
-inline PVOID InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
+inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
}
-inline LONGLONG InterlockedExchangeAdd64(volatile LONGLONG* ptr,
- LONGLONG increment) {
+inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
+ LONGLONG increment) {
return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
}
+
+#elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0
+// Like above, we need to declare the intrinsics ourselves.
+PVOID _InterlockedCompareExchangePointer(volatile PVOID* ptr,
+ PVOID newval, PVOID oldval);
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
+ PVOID newval, PVOID oldval) {
+ return _InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr),
+ newval, oldval);
+}
+
+PVOID _InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval);
+#pragma intrinsic(_InterlockedExchangePointer)
+inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
+ return _InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval);
+}
+
+LONGLONG _InterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment);
+#pragma intrinsic(_InterlockedExchangeAdd64)
+inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
+ LONGLONG increment) {
+ return _InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment);
+}
+
+#else
+inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr,
+ PVOID newval, PVOID oldval) {
+ return ::InterlockedCompareExchangePointer(ptr, newval, oldval);
+}
+inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) {
+ return ::InterlockedExchangePointer(ptr, newval);
+}
+inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr,
+ LONGLONG increment) {
+ return ::InterlockedExchangeAdd64(ptr, increment);
+}
+
#endif // ifdef __MINGW64__
+} // extern "C"
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
- PVOID result = InterlockedCompareExchangePointer(
+ PVOID result = FastInterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
@@ -201,7 +288,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
- PVOID result = InterlockedExchangePointer(
+ PVOID result = FastInterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
@@ -209,7 +296,7 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
- return InterlockedExchangeAdd64(
+ return FastInterlockedExchangeAdd64(
reinterpret_cast<volatile LONGLONG*>(ptr),
static_cast<LONGLONG>(increment)) + increment;
}
@@ -258,7 +345,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
// 64-bit low-level operations on 32-bit platform
// TODO(vchen): The GNU assembly below must be converted to MSVC inline
-// assembly. Then the file should be renamed to ...-x86-mscv.h, probably.
+// assembly. Then the file should be renamed to ...-x86-msvc.h, probably.
inline void NotImplementedFatalError(const char *function_name) {
fprintf(stderr, "64-bit %s() not implemented on this platform\n",
diff --git a/third_party/tcmalloc/chromium/src/base/basictypes.h b/third_party/tcmalloc/chromium/src/base/basictypes.h
index 0f21fca..75b7b5a 100644
--- a/third_party/tcmalloc/chromium/src/base/basictypes.h
+++ b/third_party/tcmalloc/chromium/src/base/basictypes.h
@@ -31,6 +31,7 @@
#define _BASICTYPES_H_
#include <config.h>
+#include <string.h> // for memcpy()
#ifdef HAVE_INTTYPES_H
#include <inttypes.h> // gets us PRId64, etc
#endif
@@ -193,6 +194,28 @@ struct CompileAssert {
(reinterpret_cast<char*>(&reinterpret_cast<strct*>(16)->field) - \
reinterpret_cast<char*>(16))
+// bit_cast<Dest,Source> implements the equivalent of
+// "*reinterpret_cast<Dest*>(&source)".
+//
+// The reinterpret_cast method would produce undefined behavior
+// according to ISO C++ specification section 3.10 -15 -.
+// bit_cast<> calls memcpy() which is blessed by the standard,
+// especially by the example in section 3.9.
+//
+// Fortunately memcpy() is very fast. In optimized mode, with a
+// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
+// code with the minimal amount of data movement. On a 32-bit system,
+// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
+// compiles to two loads and two stores.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+ COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_WEAK __attribute__((weak))
# define ATTRIBUTE_NOINLINE __attribute__((noinline))
@@ -309,8 +332,7 @@ class AssignAttributeStartEnd {
#endif // HAVE___ATTRIBUTE__ and __ELF__ or __MACH__
#if defined(HAVE___ATTRIBUTE__) && (defined(__i386__) || defined(__x86_64__))
-# define CACHELINE_SIZE 64
-# define CACHELINE_ALIGNED __attribute__((aligned(CACHELINE_SIZE)))
+# define CACHELINE_ALIGNED __attribute__((aligned(64)))
#else
# define CACHELINE_ALIGNED
#endif // defined(HAVE___ATTRIBUTE__) && (__i386__ || __x86_64__)
diff --git a/third_party/tcmalloc/chromium/src/base/cycleclock.h b/third_party/tcmalloc/chromium/src/base/cycleclock.h
index 6d6822a..0ce1638 100644
--- a/third_party/tcmalloc/chromium/src/base/cycleclock.h
+++ b/third_party/tcmalloc/chromium/src/base/cycleclock.h
@@ -47,21 +47,30 @@
#include "base/basictypes.h" // make sure we get the def for int64
#include "base/arm_instruction_set_select.h"
+// base/sysinfo.h is really big and we don't want to include it unless
+// it is necessary.
+#if defined(__arm__) || defined(__mips__)
+# include "base/sysinfo.h"
+#endif
#if defined(__MACH__) && defined(__APPLE__)
# include <mach/mach_time.h>
#endif
-// For MSVC, we want the __rdtsc intrinsic, declared in <intrin.h>.
-// Unfortunately, in some environments, <windows.h> and <intrin.h> have
-// conflicting declarations of some other intrinsics, breaking compilation.
+// For MSVC, we want to use '_asm rdtsc' when possible (since it works
+// with even ancient MSVC compilers), and when not possible the
+// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
+// environments, <windows.h> and <intrin.h> have conflicting
+// declarations of some other intrinsics, breaking compilation.
// Therefore, we simply declare __rdtsc ourselves. See also
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && !defined(_M_IX86)
extern "C" uint64 __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
+#if defined(ARMV3) || defined(__mips__)
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
+#endif
// NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on
@@ -108,6 +117,12 @@ struct CycleClock {
int64 itc;
asm("mov %0 = ar.itc" : "=r" (itc));
return itc;
+#elif defined(_MSC_VER) && defined(_M_IX86)
+ // Older MSVC compilers (like 7.x) don't seem to support the
+ // __rdtsc intrinsic properly, so I prefer to use _asm instead
+ // when I know it will work. Otherwise, I'll use __rdtsc and hope
+ // the code is being compiled with a non-ancient compiler.
+ _asm rdtsc
#elif defined(_MSC_VER)
return __rdtsc();
#elif defined(ARMV3)
@@ -116,11 +131,11 @@ struct CycleClock {
uint32 pmuseren;
uint32 pmcntenset;
// Read the user mode perf monitor counter access permissions.
- asm("mrc p15, 0, %0, c9, c14, 0" : "=r" (pmuseren));
+ asm volatile ("mrc p15, 0, %0, c9, c14, 0" : "=r" (pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
- asm("mrc p15, 0, %0, c9, c12, 1" : "=r" (pmcntenset));
+ asm volatile ("mrc p15, 0, %0, c9, c12, 1" : "=r" (pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting?
- asm("mrc p15, 0, %0, c9, c13, 0" : "=r" (pmccntr));
+ asm volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r" (pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64>(pmccntr) * 64; // Should optimize to << 6
}
@@ -128,7 +143,15 @@ struct CycleClock {
#endif
struct timeval tv;
gettimeofday(&tv, NULL);
- return static_cast<int64>(tv.tv_sec) * 1000000 + tv.tv_usec;
+ return static_cast<int64>((tv.tv_sec + tv.tv_usec * 0.000001)
+ * CyclesPerSecond());
+#elif defined(__mips__)
+ // mips apparently only allows rdtsc for superusers, so we fall
+ // back to gettimeofday. It's possible clock_gettime would be better.
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return static_cast<int64>((tv.tv_sec + tv.tv_usec * 0.000001)
+ * CyclesPerSecond());
#else
// The soft failover to a generic implementation is automatic only for ARM.
// For other platforms the developer is expected to make an attempt to create
diff --git a/third_party/tcmalloc/chromium/src/base/dynamic_annotations.c b/third_party/tcmalloc/chromium/src/base/dynamic_annotations.c
index 1005f90..c8b61be 100644
--- a/third_party/tcmalloc/chromium/src/base/dynamic_annotations.c
+++ b/third_party/tcmalloc/chromium/src/base/dynamic_annotations.c
@@ -50,10 +50,19 @@
# endif
#endif
+/* Compiler-based ThreadSanitizer defines
+ DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
+ and provides its own definitions of the functions. */
+
+#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
+# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
+#endif
+
/* Each function is empty and called (via a macro) only in debug mode.
The arguments are captured by dynamic tools at runtime. */
-#if DYNAMIC_ANNOTATIONS_ENABLED == 1
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1 \
+ && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock){}
@@ -122,7 +131,10 @@ void AnnotateNoOp(const char *file, int line,
const volatile void *arg){}
void AnnotateFlushState(const char *file, int line){}
-#endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1 */
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1
+ && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
+
+#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
@@ -159,6 +171,8 @@ int RunningOnValgrind(void) {
return local_running_on_valgrind;
}
+#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
+
/* See the comments in dynamic_annotations.h */
double ValgrindSlowdown(void) {
/* Same initialization hack as in RunningOnValgrind(). */
diff --git a/third_party/tcmalloc/chromium/src/base/dynamic_annotations.h b/third_party/tcmalloc/chromium/src/base/dynamic_annotations.h
index 811bb5e..4669315 100644
--- a/third_party/tcmalloc/chromium/src/base/dynamic_annotations.h
+++ b/third_party/tcmalloc/chromium/src/base/dynamic_annotations.h
@@ -378,9 +378,14 @@
#define ANNOTALYSIS_STATIC_INLINE
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ;
+#define ANNOTALYSIS_IGNORE_READS_BEGIN
+#define ANNOTALYSIS_IGNORE_READS_END
+#define ANNOTALYSIS_IGNORE_WRITES_BEGIN
+#define ANNOTALYSIS_IGNORE_WRITES_END
+#define ANNOTALYSIS_UNPROTECTED_READ
-#if defined(__GNUC__) && defined(__SUPPORT_TS_ANNOTATION__) \
- && (!defined(SWIG)) && defined(__SUPPORT_DYN_ANNOTATION__)
+#if defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__)) && \
+ defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
#if DYNAMIC_ANNOTATIONS_ENABLED == 0
#define ANNOTALYSIS_ONLY 1
@@ -389,22 +394,23 @@
#undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY
#define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY { (void)file; (void)line; }
#endif
-#define ANNOTALYSIS_IGNORE_READS_BEGIN __attribute__ ((ignore_reads_begin))
-#define ANNOTALYSIS_IGNORE_READS_END __attribute__ ((ignore_reads_end))
-#define ANNOTALYSIS_IGNORE_WRITES_BEGIN __attribute__ ((ignore_writes_begin))
-#define ANNOTALYSIS_IGNORE_WRITES_END __attribute__ ((ignore_writes_end))
-#define ANNOTALYSIS_UNPROTECTED_READ __attribute__ ((unprotected_read))
-
-#else
-
-#define ANNOTALYSIS_IGNORE_READS_BEGIN
-#define ANNOTALYSIS_IGNORE_READS_END
-#define ANNOTALYSIS_IGNORE_WRITES_BEGIN
-#define ANNOTALYSIS_IGNORE_WRITES_END
-#define ANNOTALYSIS_UNPROTECTED_READ
+/* Only emit attributes when annotalysis is enabled. */
+#if defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__)
+#undef ANNOTALYSIS_IGNORE_READS_BEGIN
+#define ANNOTALYSIS_IGNORE_READS_BEGIN __attribute__ ((ignore_reads_begin))
+#undef ANNOTALYSIS_IGNORE_READS_END
+#define ANNOTALYSIS_IGNORE_READS_END __attribute__ ((ignore_reads_end))
+#undef ANNOTALYSIS_IGNORE_WRITES_BEGIN
+#define ANNOTALYSIS_IGNORE_WRITES_BEGIN __attribute__ ((ignore_writes_begin))
+#undef ANNOTALYSIS_IGNORE_WRITES_END
+#define ANNOTALYSIS_IGNORE_WRITES_END __attribute__ ((ignore_writes_end))
+#undef ANNOTALYSIS_UNPROTECTED_READ
+#define ANNOTALYSIS_UNPROTECTED_READ __attribute__ ((unprotected_read))
#endif
+#endif // defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__))
+
/* Use the macros above rather than using these functions directly. */
#ifdef __cplusplus
extern "C" {
@@ -604,7 +610,7 @@ double ValgrindSlowdown(void);
#undef ANNOTATE_UNPROTECTED_READ
template <class T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x)
- __attribute__ ((unprotected_read)) {
+ ANNOTALYSIS_UNPROTECTED_READ {
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
diff --git a/third_party/tcmalloc/chromium/src/base/elf_mem_image.cc b/third_party/tcmalloc/chromium/src/base/elf_mem_image.cc
new file mode 100644
index 0000000..2949343
--- /dev/null
+++ b/third_party/tcmalloc/chromium/src/base/elf_mem_image.cc
@@ -0,0 +1,433 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Paul Pluzhnikov
+//
+// Allow dynamic symbol lookup in an in-memory Elf image.
+//
+
+#include "base/elf_mem_image.h"
+
+#ifdef HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
+
+#include <stddef.h> // for size_t, ptrdiff_t
+#include "base/logging.h"
+
+// From binutils/include/elf/common.h (this doesn't appear to be documented
+// anywhere else).
+//
+// /* This flag appears in a Versym structure. It means that the symbol
+// is hidden, and is only visible with an explicit version number.
+// This is a GNU extension. */
+// #define VERSYM_HIDDEN 0x8000
+//
+// /* This is the mask for the rest of the Versym information. */
+// #define VERSYM_VERSION 0x7fff
+
+#define VERSYM_VERSION 0x7fff
+
+namespace base {
+
+namespace {
+template <int N> class ElfClass {
+ public:
+ static const int kElfClass = -1;
+ static int ElfBind(const ElfW(Sym) *) {
+ CHECK(false); // << "Unexpected word size";
+ return 0;
+ }
+ static int ElfType(const ElfW(Sym) *) {
+ CHECK(false); // << "Unexpected word size";
+ return 0;
+ }
+};
+
+template <> class ElfClass<32> {
+ public:
+ static const int kElfClass = ELFCLASS32;
+ static int ElfBind(const ElfW(Sym) *symbol) {
+ return ELF32_ST_BIND(symbol->st_info);
+ }
+ static int ElfType(const ElfW(Sym) *symbol) {
+ return ELF32_ST_TYPE(symbol->st_info);
+ }
+};
+
+template <> class ElfClass<64> {
+ public:
+ static const int kElfClass = ELFCLASS64;
+ static int ElfBind(const ElfW(Sym) *symbol) {
+ return ELF64_ST_BIND(symbol->st_info);
+ }
+ static int ElfType(const ElfW(Sym) *symbol) {
+ return ELF64_ST_TYPE(symbol->st_info);
+ }
+};
+
+typedef ElfClass<__WORDSIZE> CurrentElfClass;
+
+// Extract an element from one of the ELF tables, cast it to desired type.
+// This is just a simple arithmetic and a glorified cast.
+// Callers are responsible for bounds checking.
+template <class T>
+const T* GetTableElement(const ElfW(Ehdr) *ehdr,
+ ElfW(Off) table_offset,
+ ElfW(Word) element_size,
+ size_t index) {
+ return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+ + table_offset
+ + index * element_size);
+}
+} // namespace
+
+const void *const ElfMemImage::kInvalidBase =
+ reinterpret_cast<const void *>(~0L);
+
+ElfMemImage::ElfMemImage(const void *base) {
+ CHECK(base != kInvalidBase);
+ Init(base);
+}
+
+int ElfMemImage::GetNumSymbols() const {
+ if (!hash_) {
+ return 0;
+ }
+ // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
+ return hash_[1];
+}
+
+const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
+ CHECK_LT(index, GetNumSymbols());
+ return dynsym_ + index;
+}
+
+const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
+ CHECK_LT(index, GetNumSymbols());
+ return versym_ + index;
+}
+
+const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
+ CHECK_LT(index, ehdr_->e_phnum);
+ return GetTableElement<ElfW(Phdr)>(ehdr_,
+ ehdr_->e_phoff,
+ ehdr_->e_phentsize,
+ index);
+}
+
+const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
+ CHECK_LT(offset, strsize_);
+ return dynstr_ + offset;
+}
+
+const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
+ if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
+ // Symbol corresponds to "special" (e.g. SHN_ABS) section.
+ return reinterpret_cast<const void *>(sym->st_value);
+ }
+ CHECK_LT(link_base_, sym->st_value);
+ return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_;
+}
+
+const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
+ CHECK_LE(index, verdefnum_);
+ const ElfW(Verdef) *version_definition = verdef_;
+ while (version_definition->vd_ndx < index && version_definition->vd_next) {
+ const char *const version_definition_as_char =
+ reinterpret_cast<const char *>(version_definition);
+ version_definition =
+ reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
+ version_definition->vd_next);
+ }
+ return version_definition->vd_ndx == index ? version_definition : NULL;
+}
+
+const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
+ const ElfW(Verdef) *verdef) const {
+ return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
+}
+
+const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
+ CHECK_LT(offset, strsize_);
+ return dynstr_ + offset;
+}
+
+void ElfMemImage::Init(const void *base) {
+ ehdr_ = NULL;
+ dynsym_ = NULL;
+ dynstr_ = NULL;
+ versym_ = NULL;
+ verdef_ = NULL;
+ hash_ = NULL;
+ strsize_ = 0;
+ verdefnum_ = 0;
+ link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
+ if (!base) {
+ return;
+ }
+ const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base);
+ // Fake VDSO has low bit set.
+ const bool fake_vdso = ((base_as_uintptr_t & 1) != 0);
+ base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1);
+ const char *const base_as_char = reinterpret_cast<const char *>(base);
+ if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
+ base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
+ RAW_DCHECK(false, "no ELF magic"); // at %p", base);
+ return;
+ }
+ int elf_class = base_as_char[EI_CLASS];
+ if (elf_class != CurrentElfClass::kElfClass) {
+ DCHECK_EQ(elf_class, CurrentElfClass::kElfClass);
+ return;
+ }
+ switch (base_as_char[EI_DATA]) {
+ case ELFDATA2LSB: {
+ if (__LITTLE_ENDIAN != __BYTE_ORDER) {
+ DCHECK_EQ(__LITTLE_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
+ return;
+ }
+ break;
+ }
+ case ELFDATA2MSB: {
+ if (__BIG_ENDIAN != __BYTE_ORDER) {
+ DCHECK_EQ(__BIG_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
+ return;
+ }
+ break;
+ }
+ default: {
+ RAW_DCHECK(false, "unexpected data encoding"); // << base_as_char[EI_DATA];
+ return;
+ }
+ }
+
+ ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
+ const ElfW(Phdr) *dynamic_program_header = NULL;
+ for (int i = 0; i < ehdr_->e_phnum; ++i) {
+ const ElfW(Phdr) *const program_header = GetPhdr(i);
+ switch (program_header->p_type) {
+ case PT_LOAD:
+ if (link_base_ == ~0L) {
+ link_base_ = program_header->p_vaddr;
+ }
+ break;
+ case PT_DYNAMIC:
+ dynamic_program_header = program_header;
+ break;
+ }
+ }
+ if (link_base_ == ~0L || !dynamic_program_header) {
+ RAW_DCHECK(~0L != link_base_, "no PT_LOADs in VDSO");
+ RAW_DCHECK(dynamic_program_header, "no PT_DYNAMIC in VDSO");
+ // Mark this image as not present. Can not recur infinitely.
+ Init(0);
+ return;
+ }
+ ptrdiff_t relocation =
+ base_as_char - reinterpret_cast<const char *>(link_base_);
+ ElfW(Dyn) *dynamic_entry =
+ reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
+ relocation);
+ for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
+ ElfW(Xword) value = dynamic_entry->d_un.d_val;
+ if (fake_vdso) {
+ // A complication: in the real VDSO, dynamic entries are not relocated
+ // (it wasn't loaded by a dynamic loader). But when testing with a
+ // "fake" dlopen()ed vdso library, the loader relocates some (but
+ // not all!) of them before we get here.
+ if (dynamic_entry->d_tag == DT_VERDEF) {
+ // The only dynamic entry (of the ones we care about) libc-2.3.6
+ // loader doesn't relocate.
+ value += relocation;
+ }
+ } else {
+ // Real VDSO. Everything needs to be relocated.
+ value += relocation;
+ }
+ switch (dynamic_entry->d_tag) {
+ case DT_HASH:
+ hash_ = reinterpret_cast<ElfW(Word) *>(value);
+ break;
+ case DT_SYMTAB:
+ dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
+ break;
+ case DT_STRTAB:
+ dynstr_ = reinterpret_cast<const char *>(value);
+ break;
+ case DT_VERSYM:
+ versym_ = reinterpret_cast<ElfW(Versym) *>(value);
+ break;
+ case DT_VERDEF:
+ verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
+ break;
+ case DT_VERDEFNUM:
+ verdefnum_ = dynamic_entry->d_un.d_val;
+ break;
+ case DT_STRSZ:
+ strsize_ = dynamic_entry->d_un.d_val;
+ break;
+ default:
+ // Unrecognized entries explicitly ignored.
+ break;
+ }
+ }
+ if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
+ !verdef_ || !verdefnum_ || !strsize_) {
+ RAW_DCHECK(hash_, "invalid VDSO (no DT_HASH)");
+ RAW_DCHECK(dynsym_, "invalid VDSO (no DT_SYMTAB)");
+ RAW_DCHECK(dynstr_, "invalid VDSO (no DT_STRTAB)");
+ RAW_DCHECK(versym_, "invalid VDSO (no DT_VERSYM)");
+ RAW_DCHECK(verdef_, "invalid VDSO (no DT_VERDEF)");
+ RAW_DCHECK(verdefnum_, "invalid VDSO (no DT_VERDEFNUM)");
+ RAW_DCHECK(strsize_, "invalid VDSO (no DT_STRSZ)");
+ // Mark this image as not present. Can not recur infinitely.
+ Init(0);
+ return;
+ }
+}
+
+bool ElfMemImage::LookupSymbol(const char *name,
+ const char *version,
+ int type,
+ SymbolInfo *info) const {
+ for (SymbolIterator it = begin(); it != end(); ++it) {
+ if (strcmp(it->name, name) == 0 && strcmp(it->version, version) == 0 &&
+ CurrentElfClass::ElfType(it->symbol) == type) {
+ if (info) {
+ *info = *it;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ElfMemImage::LookupSymbolByAddress(const void *address,
+ SymbolInfo *info_out) const {
+ for (SymbolIterator it = begin(); it != end(); ++it) {
+ const char *const symbol_start =
+ reinterpret_cast<const char *>(it->address);
+ const char *const symbol_end = symbol_start + it->symbol->st_size;
+ if (symbol_start <= address && address < symbol_end) {
+ if (info_out) {
+ // Client wants to know details for that symbol (the usual case).
+ if (CurrentElfClass::ElfBind(it->symbol) == STB_GLOBAL) {
+ // Strong symbol; just return it.
+ *info_out = *it;
+ return true;
+ } else {
+ // Weak or local. Record it, but keep looking for a strong one.
+ *info_out = *it;
+ }
+ } else {
+ // Client only cares if there is an overlapping symbol.
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
+ : index_(index), image_(image) {
+}
+
+const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
+ return &info_;
+}
+
+const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
+ return info_;
+}
+
+bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
+ return this->image_ == rhs.image_ && this->index_ == rhs.index_;
+}
+
+bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
+ return !(*this == rhs);
+}
+
+ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
+ this->Update(1);
+ return *this;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::begin() const {
+ SymbolIterator it(this, 0);
+ it.Update(0);
+ return it;
+}
+
+ElfMemImage::SymbolIterator ElfMemImage::end() const {
+ return SymbolIterator(this, GetNumSymbols());
+}
+
+void ElfMemImage::SymbolIterator::Update(int increment) {
+ const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
+ CHECK(image->IsPresent() || increment == 0);
+ if (!image->IsPresent()) {
+ return;
+ }
+ index_ += increment;
+ if (index_ >= image->GetNumSymbols()) {
+ index_ = image->GetNumSymbols();
+ return;
+ }
+ const ElfW(Sym) *symbol = image->GetDynsym(index_);
+ const ElfW(Versym) *version_symbol = image->GetVersym(index_);
+ CHECK(symbol && version_symbol);
+ const char *const symbol_name = image->GetDynstr(symbol->st_name);
+ const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
+ const ElfW(Verdef) *version_definition = NULL;
+ const char *version_name = "";
+ if (symbol->st_shndx == SHN_UNDEF) {
+ // Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
+ // version_index could well be greater than verdefnum_, so calling
+ // GetVerdef(version_index) may trigger assertion.
+ } else {
+ version_definition = image->GetVerdef(version_index);
+ }
+ if (version_definition) {
+ // I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
+ // optional 2nd if the version has a parent.
+ CHECK_LE(1, version_definition->vd_cnt);
+ CHECK_LE(version_definition->vd_cnt, 2);
+ const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
+ version_name = image->GetVerstr(version_aux->vda_name);
+ }
+ info_.name = symbol_name;
+ info_.version = version_name;
+ info_.address = image->GetSymAddr(symbol);
+ info_.symbol = symbol;
+}
+
+} // namespace base
+
+#endif // HAVE_ELF_MEM_IMAGE
diff --git a/third_party/tcmalloc/chromium/src/base/elf_mem_image.h b/third_party/tcmalloc/chromium/src/base/elf_mem_image.h
new file mode 100644
index 0000000..6f1f097
--- /dev/null
+++ b/third_party/tcmalloc/chromium/src/base/elf_mem_image.h
@@ -0,0 +1,134 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Paul Pluzhnikov
+//
+// Allow dynamic symbol lookup for in-memory Elf images.
+
+#ifndef BASE_ELF_MEM_IMAGE_H_
+#define BASE_ELF_MEM_IMAGE_H_
+
+#include <config.h>
+#ifdef HAVE_FEATURES_H
+#include <features.h> // for __GLIBC__
+#endif
+
+// Maybe one day we can rewrite this file not to require the elf
+// symbol extensions in glibc, but for right now we need them.
+#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__)
+
+#define HAVE_ELF_MEM_IMAGE 1
+
+#include <stdlib.h>
+#include <link.h> // for ElfW
+
+namespace base {
+
+// An in-memory ELF image (may not exist on disk).
+class ElfMemImage {
+ public:
+ // Sentinel: there could never be an elf image at this address.
+ static const void *const kInvalidBase;
+
+ // Information about a single vdso symbol.
+ // All pointers are into .dynsym, .dynstr, or .text of the VDSO.
+ // Do not free() them or modify through them.
+ struct SymbolInfo {
+ const char *name; // E.g. "__vdso_getcpu"
+ const char *version; // E.g. "LINUX_2.6", could be ""
+ // for unversioned symbol.
+ const void *address; // Relocated symbol address.
+ const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
+ };
+
+ // Supports iteration over all dynamic symbols.
+ class SymbolIterator {
+ public:
+ friend class ElfMemImage;
+ const SymbolInfo *operator->() const;
+ const SymbolInfo &operator*() const;
+ SymbolIterator& operator++();
+ bool operator!=(const SymbolIterator &rhs) const;
+ bool operator==(const SymbolIterator &rhs) const;
+ private:
+ SymbolIterator(const void *const image, int index);
+ void Update(int incr);
+ SymbolInfo info_;
+ int index_;
+ const void *const image_;
+ };
+
+
+ explicit ElfMemImage(const void *base);
+ void Init(const void *base);
+ bool IsPresent() const { return ehdr_ != NULL; }
+ const ElfW(Phdr)* GetPhdr(int index) const;
+ const ElfW(Sym)* GetDynsym(int index) const;
+ const ElfW(Versym)* GetVersym(int index) const;
+ const ElfW(Verdef)* GetVerdef(int index) const;
+ const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
+ const char* GetDynstr(ElfW(Word) offset) const;
+ const void* GetSymAddr(const ElfW(Sym) *sym) const;
+ const char* GetVerstr(ElfW(Word) offset) const;
+ int GetNumSymbols() const;
+
+ SymbolIterator begin() const;
+ SymbolIterator end() const;
+
+ // Look up versioned dynamic symbol in the image.
+ // Returns false if image is not present, or doesn't contain given
+ // symbol/version/type combination.
+ // If info_out != NULL, additional details are filled in.
+ bool LookupSymbol(const char *name, const char *version,
+ int symbol_type, SymbolInfo *info_out) const;
+
+ // Find info about symbol (if any) which overlaps given address.
+ // Returns true if symbol was found; false if image isn't present
+ // or doesn't have a symbol overlapping given address.
+ // If info_out != NULL, additional details are filled in.
+ bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
+
+ private:
+ const ElfW(Ehdr) *ehdr_;
+ const ElfW(Sym) *dynsym_;
+ const ElfW(Versym) *versym_;
+ const ElfW(Verdef) *verdef_;
+ const ElfW(Word) *hash_;
+ const char *dynstr_;
+ size_t strsize_;
+ size_t verdefnum_;
+ ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
+};
+
+} // namespace base
+
+#endif // __ELF__ and __GLIBC__ and !__native_client__
+
+#endif // BASE_ELF_MEM_IMAGE_H_
diff --git a/third_party/tcmalloc/chromium/src/base/googleinit.h b/third_party/tcmalloc/chromium/src/base/googleinit.h
index 62ad84c..a48375d 100644
--- a/third_party/tcmalloc/chromium/src/base/googleinit.h
+++ b/third_party/tcmalloc/chromium/src/base/googleinit.h
@@ -33,19 +33,55 @@
#ifndef _GOOGLEINIT_H
#define _GOOGLEINIT_H
+#include "base/logging.h"
+
class GoogleInitializer {
public:
- typedef void (*void_function)(void);
- GoogleInitializer(const char* name, void_function f) {
- f();
+ typedef void (*VoidFunction)(void);
+ GoogleInitializer(const char* name, VoidFunction ctor, VoidFunction dtor)
+ : name_(name), destructor_(dtor) {
+ // TODO(dmikurube): Re-enable the commented-out code.
+ // We commented out the following line, since Chromium does not have the
+ // proper includes to log using these macros.
+ //
+ // Commended-out code:
+ // RAW_VLOG(10, "<GoogleModuleObject> constructing: %s\n", name_);
+ //
+ // This googleinit.h is included from out of third_party/tcmalloc, such as
+ // net/tools/flip_server/balsa_headers.cc.
+ // "base/logging.h" (included above) indicates Chromium's base/logging.h
+ // when this googleinit.h is included from out of third_party/tcmalloc.
+ if (ctor)
+ ctor();
+ }
+ ~GoogleInitializer() {
+ // TODO(dmikurube): Re-enable the commented-out code.
+ // The same as above. The following line is commented out in Chromium.
+ //
+ // Commended-out code:
+ // RAW_VLOG(10, "<GoogleModuleObject> destroying: %s\n", name_);
+ if (destructor_)
+ destructor_();
}
+
+ private:
+ const char* const name_;
+ const VoidFunction destructor_;
};
#define REGISTER_MODULE_INITIALIZER(name, body) \
namespace { \
static void google_init_module_##name () { body; } \
GoogleInitializer google_initializer_module_##name(#name, \
- google_init_module_##name); \
+ google_init_module_##name, NULL); \
}
+#define REGISTER_MODULE_DESTRUCTOR(name, body) \
+ namespace { \
+ static void google_destruct_module_##name () { body; } \
+ GoogleInitializer google_destructor_module_##name(#name, \
+ NULL, google_destruct_module_##name); \
+ }
+
+
#endif /* _GOOGLEINIT_H */
diff --git a/third_party/tcmalloc/chromium/src/base/linux_syscall_support.h b/third_party/tcmalloc/chromium/src/base/linux_syscall_support.h
index f11f823..99dac9e 100644
--- a/third_party/tcmalloc/chromium/src/base/linux_syscall_support.h
+++ b/third_party/tcmalloc/chromium/src/base/linux_syscall_support.h
@@ -69,6 +69,63 @@
* This file defines a few internal symbols that all start with "LSS_".
* Do not access these symbols from outside this file. They are not part
* of the supported API.
+ *
+ * NOTE: This is a stripped down version of the official opensource
+ * version of linux_syscall_support.h, which lives at
+ * http://code.google.com/p/linux-syscall-support/
+ * It includes only the syscalls that are used in perftools, plus a
+ * few extra. Here's the breakdown:
+ * 1) Perftools uses these: grep -rho 'sys_[a-z0-9_A-Z]* *(' src | sort -u
+ * sys__exit(
+ * sys_clone(
+ * sys_close(
+ * sys_fcntl(
+ * sys_fstat(
+ * sys_futex(
+ * sys_futex1(
+ * sys_getcpu(
+ * sys_getdents(
+ * sys_getppid(
+ * sys_gettid(
+ * sys_lseek(
+ * sys_mmap(
+ * sys_mremap(
+ * sys_munmap(
+ * sys_open(
+ * sys_pipe(
+ * sys_prctl(
+ * sys_ptrace(
+ * sys_ptrace_detach(
+ * sys_read(
+ * sys_sched_yield(
+ * sys_sigaction(
+ * sys_sigaltstack(
+ * sys_sigdelset(
+ * sys_sigfillset(
+ * sys_sigprocmask(
+ * sys_socket(
+ * sys_stat(
+ * sys_waitpid(
+ * 2) These are used as subroutines of the above:
+ * sys_getpid -- gettid
+ * sys_kill -- ptrace_detach
+ * sys_restore -- sigaction
+ * sys_restore_rt -- sigaction
+ * sys_socketcall -- socket
+ * sys_wait4 -- waitpid
+ * 3) I left these in even though they're not used. They either
+ * complement the above (write vs read) or are variants (rt_sigaction):
+ * sys_fstat64
+ * sys_getdents64
+ * sys_llseek
+ * sys_mmap2
+ * sys_openat
+ * sys_rt_sigaction
+ * sys_rt_sigprocmask
+ * sys_sigaddset
+ * sys_sigemptyset
+ * sys_stat64
+ * sys_write
*/
#ifndef SYS_LINUX_SYSCALL_SUPPORT_H
#define SYS_LINUX_SYSCALL_SUPPORT_H
@@ -76,7 +133,7 @@
/* We currently only support x86-32, x86-64, ARM, MIPS, and PPC on Linux.
* Porting to other related platforms should not be difficult.
*/
-#if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__) || \
+#if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
defined(__mips__) || defined(__PPC__)) && defined(__linux)
#ifndef SYS_CPLUSPLUS
@@ -154,36 +211,6 @@ struct kernel_dirent {
char d_name[256];
};
-/* include/linux/uio.h */
-struct kernel_iovec {
- void *iov_base;
- unsigned long iov_len;
-};
-
-/* include/linux/socket.h */
-struct kernel_msghdr {
- void *msg_name;
- int msg_namelen;
- struct kernel_iovec*msg_iov;
- unsigned long msg_iovlen;
- void *msg_control;
- unsigned long msg_controllen;
- unsigned msg_flags;
-};
-
-/* include/asm-generic/poll.h */
-struct kernel_pollfd {
- int fd;
- short events;
- short revents;
-};
-
-/* include/linux/resource.h */
-struct kernel_rlimit {
- unsigned long rlim_cur;
- unsigned long rlim_max;
-};
-
/* include/linux/time.h */
struct kernel_timespec {
long tv_sec;
@@ -217,7 +244,7 @@ struct kernel_rusage {
};
struct siginfo;
-#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__PPC__)
+#if defined(__i386__) || defined(__arm__) || defined(__PPC__)
/* include/asm-{arm,i386,mips,ppc}/signal.h */
struct kernel_old_sigaction {
@@ -274,12 +301,6 @@ struct kernel_sigaction {
#endif
};
-/* include/linux/socket.h */
-struct kernel_sockaddr {
- unsigned short sa_family;
- char sa_data[14];
-};
-
/* include/asm-{arm,i386,mips,ppc}/stat.h */
#ifdef __mips__
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -354,7 +375,7 @@ struct kernel_stat64 {
#endif
/* include/asm-{arm,i386,mips,x86_64,ppc}/stat.h */
-#if defined(__i386__) || defined(__ARM_ARCH_3__)
+#if defined(__i386__) || defined(__arm__)
struct kernel_stat {
/* The kernel headers suggest that st_dev and st_rdev should be 32bit
* quantities encoding 12bit major and 20bit minor numbers in an interleaved
@@ -449,89 +470,15 @@ struct kernel_stat {
};
#endif
-/* include/asm-{arm,i386,mips,x86_64,ppc}/statfs.h */
-#ifdef __mips__
-#if _MIPS_SIM != _MIPS_SIM_ABI64
-struct kernel_statfs64 {
- unsigned long f_type;
- unsigned long f_bsize;
- unsigned long f_frsize;
- unsigned long __pad;
- unsigned long long f_blocks;
- unsigned long long f_bfree;
- unsigned long long f_files;
- unsigned long long f_ffree;
- unsigned long long f_bavail;
- struct { int val[2]; } f_fsid;
- unsigned long f_namelen;
- unsigned long f_spare[6];
-};
-#endif
-#elif !defined(__x86_64__)
-struct kernel_statfs64 {
- unsigned long f_type;
- unsigned long f_bsize;
- unsigned long long f_blocks;
- unsigned long long f_bfree;
- unsigned long long f_bavail;
- unsigned long long f_files;
- unsigned long long f_ffree;
- struct { int val[2]; } f_fsid;
- unsigned long f_namelen;
- unsigned long f_frsize;
- unsigned long f_spare[5];
-};
-#endif
-
-/* include/asm-{arm,i386,mips,x86_64,ppc,generic}/statfs.h */
-#ifdef __mips__
-struct kernel_statfs {
- long f_type;
- long f_bsize;
- long f_frsize;
- long f_blocks;
- long f_bfree;
- long f_files;
- long f_ffree;
- long f_bavail;
- struct { int val[2]; } f_fsid;
- long f_namelen;
- long f_spare[6];
-};
-#else
-struct kernel_statfs {
- /* x86_64 actually defines all these fields as signed, whereas all other */
- /* platforms define them as unsigned. Leaving them at unsigned should not */
- /* cause any problems. */
- unsigned long f_type;
- unsigned long f_bsize;
- unsigned long f_blocks;
- unsigned long f_bfree;
- unsigned long f_bavail;
- unsigned long f_files;
- unsigned long f_ffree;
- struct { int val[2]; } f_fsid;
- unsigned long f_namelen;
- unsigned long f_frsize;
- unsigned long f_spare[5];
-};
-#endif
-
/* Definitions missing from the standard header files */
#ifndef O_DIRECTORY
-#if defined(__ARM_ARCH_3__)
+#if defined(__arm__)
#define O_DIRECTORY 0040000
#else
#define O_DIRECTORY 0200000
#endif
#endif
-#ifndef NT_PRXFPREG
-#define NT_PRXFPREG 0x46e62b7f
-#endif
-#ifndef PTRACE_GETFPXREGS
-#define PTRACE_GETFPXREGS ((enum __ptrace_request)18)
-#endif
#ifndef PR_GET_DUMPABLE
#define PR_GET_DUMPABLE 3
#endif
@@ -553,46 +500,11 @@ struct kernel_statfs {
#ifndef SA_RESTORER
#define SA_RESTORER 0x04000000
#endif
-#ifndef CPUCLOCK_PROF
-#define CPUCLOCK_PROF 0
-#endif
-#ifndef CPUCLOCK_VIRT
-#define CPUCLOCK_VIRT 1
-#endif
-#ifndef CPUCLOCK_SCHED
-#define CPUCLOCK_SCHED 2
-#endif
-#ifndef CPUCLOCK_PERTHREAD_MASK
-#define CPUCLOCK_PERTHREAD_MASK 4
-#endif
-#ifndef MAKE_PROCESS_CPUCLOCK
-#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
- ((~(int)(pid) << 3) | (int)(clock))
-#endif
-#ifndef MAKE_THREAD_CPUCLOCK
-#define MAKE_THREAD_CPUCLOCK(tid, clock) \
- ((~(int)(tid) << 3) | (int)((clock) | CPUCLOCK_PERTHREAD_MASK))
-#endif
#if defined(__i386__)
-#ifndef __NR_setresuid
-#define __NR_setresuid 164
-#define __NR_setresgid 170
-#endif
#ifndef __NR_rt_sigaction
#define __NR_rt_sigaction 174
#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigsuspend 179
-#endif
-#ifndef __NR_pread64
-#define __NR_pread64 180
-#endif
-#ifndef __NR_pwrite64
-#define __NR_pwrite64 181
-#endif
-#ifndef __NR_ugetrlimit
-#define __NR_ugetrlimit 191
#endif
#ifndef __NR_stat64
#define __NR_stat64 195
@@ -600,110 +512,43 @@ struct kernel_statfs {
#ifndef __NR_fstat64
#define __NR_fstat64 197
#endif
-#ifndef __NR_setresuid32
-#define __NR_setresuid32 208
-#define __NR_setresgid32 210
-#endif
-#ifndef __NR_setfsuid32
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#endif
#ifndef __NR_getdents64
#define __NR_getdents64 220
#endif
#ifndef __NR_gettid
#define __NR_gettid 224
#endif
-#ifndef __NR_readahead
-#define __NR_readahead 225
-#endif
-#ifndef __NR_setxattr
-#define __NR_setxattr 226
-#endif
-#ifndef __NR_lsetxattr
-#define __NR_lsetxattr 227
-#endif
-#ifndef __NR_getxattr
-#define __NR_getxattr 229
-#endif
-#ifndef __NR_lgetxattr
-#define __NR_lgetxattr 230
-#endif
-#ifndef __NR_listxattr
-#define __NR_listxattr 232
-#endif
-#ifndef __NR_llistxattr
-#define __NR_llistxattr 233
-#endif
#ifndef __NR_futex
#define __NR_futex 240
#endif
-#ifndef __NR_sched_setaffinity
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#endif
-#ifndef __NR_set_tid_address
-#define __NR_set_tid_address 258
-#endif
-#ifndef __NR_clock_gettime
-#define __NR_clock_gettime 265
-#endif
-#ifndef __NR_clock_getres
-#define __NR_clock_getres 266
-#endif
-#ifndef __NR_statfs64
-#define __NR_statfs64 268
-#endif
-#ifndef __NR_fstatfs64
-#define __NR_fstatfs64 269
-#endif
-#ifndef __NR_fadvise64_64
-#define __NR_fadvise64_64 272
-#endif
-#ifndef __NR_ioprio_set
-#define __NR_ioprio_set 289
-#endif
-#ifndef __NR_ioprio_get
-#define __NR_ioprio_get 290
-#endif
#ifndef __NR_openat
#define __NR_openat 295
#endif
-#ifndef __NR_fstatat64
-#define __NR_fstatat64 300
-#endif
-#ifndef __NR_unlinkat
-#define __NR_unlinkat 301
-#endif
-#ifndef __NR_move_pages
-#define __NR_move_pages 317
-#endif
#ifndef __NR_getcpu
#define __NR_getcpu 318
#endif
-#ifndef __NR_fallocate
-#define __NR_fallocate 324
-#endif
/* End of i386 definitions */
-#elif defined(__ARM_ARCH_3__)
-#ifndef __NR_setresuid
-#define __NR_setresuid (__NR_SYSCALL_BASE + 164)
-#define __NR_setresgid (__NR_SYSCALL_BASE + 170)
+#elif defined(__arm__)
+#ifndef __syscall
+#if defined(__thumb__) || defined(__ARM_EABI__)
+#define __SYS_REG(name) register long __sysreg __asm__("r6") = __NR_##name;
+#define __SYS_REG_LIST(regs...) [sysreg] "r" (__sysreg) , ##regs
+#define __syscall(name) "swi\t0"
+#define __syscall_safe(name) \
+ "push {r7}\n" \
+ "mov r7,%[sysreg]\n" \
+ __syscall(name)"\n" \
+ "pop {r7}"
+#else
+#define __SYS_REG(name)
+#define __SYS_REG_LIST(regs...) regs
+#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
+#define __syscall_safe(name) __syscall(name)
+#endif
#endif
#ifndef __NR_rt_sigaction
#define __NR_rt_sigaction (__NR_SYSCALL_BASE + 174)
#define __NR_rt_sigprocmask (__NR_SYSCALL_BASE + 175)
-#define __NR_rt_sigpending (__NR_SYSCALL_BASE + 176)
-#define __NR_rt_sigsuspend (__NR_SYSCALL_BASE + 179)
-#endif
-#ifndef __NR_pread64
-#define __NR_pread64 (__NR_SYSCALL_BASE + 180)
-#endif
-#ifndef __NR_pwrite64
-#define __NR_pwrite64 (__NR_SYSCALL_BASE + 181)
-#endif
-#ifndef __NR_ugetrlimit
-#define __NR_ugetrlimit (__NR_SYSCALL_BASE + 191)
#endif
#ifndef __NR_stat64
#define __NR_stat64 (__NR_SYSCALL_BASE + 195)
@@ -711,172 +556,35 @@ struct kernel_statfs {
#ifndef __NR_fstat64
#define __NR_fstat64 (__NR_SYSCALL_BASE + 197)
#endif
-#ifndef __NR_setresuid32
-#define __NR_setresuid32 (__NR_SYSCALL_BASE + 208)
-#define __NR_setresgid32 (__NR_SYSCALL_BASE + 210)
-#endif
-#ifndef __NR_setfsuid32
-#define __NR_setfsuid32 (__NR_SYSCALL_BASE + 215)
-#define __NR_setfsgid32 (__NR_SYSCALL_BASE + 216)
-#endif
#ifndef __NR_getdents64
#define __NR_getdents64 (__NR_SYSCALL_BASE + 217)
#endif
#ifndef __NR_gettid
#define __NR_gettid (__NR_SYSCALL_BASE + 224)
#endif
-#ifndef __NR_readahead
-#define __NR_readahead (__NR_SYSCALL_BASE + 225)
-#endif
-#ifndef __NR_setxattr
-#define __NR_setxattr (__NR_SYSCALL_BASE + 226)
-#endif
-#ifndef __NR_lsetxattr
-#define __NR_lsetxattr (__NR_SYSCALL_BASE + 227)
-#endif
-#ifndef __NR_getxattr
-#define __NR_getxattr (__NR_SYSCALL_BASE + 229)
-#endif
-#ifndef __NR_lgetxattr
-#define __NR_lgetxattr (__NR_SYSCALL_BASE + 230)
-#endif
-#ifndef __NR_listxattr
-#define __NR_listxattr (__NR_SYSCALL_BASE + 232)
-#endif
-#ifndef __NR_llistxattr
-#define __NR_llistxattr (__NR_SYSCALL_BASE + 233)
-#endif
#ifndef __NR_futex
#define __NR_futex (__NR_SYSCALL_BASE + 240)
#endif
-#ifndef __NR_sched_setaffinity
-#define __NR_sched_setaffinity (__NR_SYSCALL_BASE + 241)
-#define __NR_sched_getaffinity (__NR_SYSCALL_BASE + 242)
-#endif
-#ifndef __NR_set_tid_address
-#define __NR_set_tid_address (__NR_SYSCALL_BASE + 256)
-#endif
-#ifndef __NR_clock_gettime
-#define __NR_clock_gettime (__NR_SYSCALL_BASE + 263)
-#endif
-#ifndef __NR_clock_getres
-#define __NR_clock_getres (__NR_SYSCALL_BASE + 264)
-#endif
-#ifndef __NR_statfs64
-#define __NR_statfs64 (__NR_SYSCALL_BASE + 266)
-#endif
-#ifndef __NR_fstatfs64
-#define __NR_fstatfs64 (__NR_SYSCALL_BASE + 267)
-#endif
-#ifndef __NR_ioprio_set
-#define __NR_ioprio_set (__NR_SYSCALL_BASE + 314)
-#endif
-#ifndef __NR_ioprio_get
-#define __NR_ioprio_get (__NR_SYSCALL_BASE + 315)
-#endif
-#ifndef __NR_move_pages
-#define __NR_move_pages (__NR_SYSCALL_BASE + 344)
-#endif
-#ifndef __NR_getcpu
-#define __NR_getcpu (__NR_SYSCALL_BASE + 345)
-#endif
-/* End of ARM 3 definitions */
+/* End of ARM definitions */
#elif defined(__x86_64__)
-#ifndef __NR_pread64
-#define __NR_pread64 17
-#endif
-#ifndef __NR_pwrite64
-#define __NR_pwrite64 18
-#endif
-#ifndef __NR_setresuid
-#define __NR_setresuid 117
-#define __NR_setresgid 119
-#endif
#ifndef __NR_gettid
#define __NR_gettid 186
#endif
-#ifndef __NR_readahead
-#define __NR_readahead 187
-#endif
-#ifndef __NR_setxattr
-#define __NR_setxattr 188
-#endif
-#ifndef __NR_lsetxattr
-#define __NR_lsetxattr 189
-#endif
-#ifndef __NR_getxattr
-#define __NR_getxattr 191
-#endif
-#ifndef __NR_lgetxattr
-#define __NR_lgetxattr 192
-#endif
-#ifndef __NR_listxattr
-#define __NR_listxattr 194
-#endif
-#ifndef __NR_llistxattr
-#define __NR_llistxattr 195
-#endif
#ifndef __NR_futex
#define __NR_futex 202
#endif
-#ifndef __NR_sched_setaffinity
-#define __NR_sched_setaffinity 203
-#define __NR_sched_getaffinity 204
-#endif
#ifndef __NR_getdents64
#define __NR_getdents64 217
#endif
-#ifndef __NR_set_tid_address
-#define __NR_set_tid_address 218
-#endif
-#ifndef __NR_fadvise64
-#define __NR_fadvise64 221
-#endif
-#ifndef __NR_clock_gettime
-#define __NR_clock_gettime 228
-#endif
-#ifndef __NR_clock_getres
-#define __NR_clock_getres 229
-#endif
-#ifndef __NR_ioprio_set
-#define __NR_ioprio_set 251
-#endif
-#ifndef __NR_ioprio_get
-#define __NR_ioprio_get 252
-#endif
#ifndef __NR_openat
#define __NR_openat 257
#endif
-#ifndef __NR_newfstatat
-#define __NR_newfstatat 262
-#endif
-#ifndef __NR_unlinkat
-#define __NR_unlinkat 263
-#endif
-#ifndef __NR_move_pages
-#define __NR_move_pages 279
-#endif
-#ifndef __NR_fallocate
-#define __NR_fallocate 285
-#endif
/* End of x86-64 definitions */
#elif defined(__mips__)
#if _MIPS_SIM == _MIPS_SIM_ABI32
-#ifndef __NR_setresuid
-#define __NR_setresuid (__NR_Linux + 185)
-#define __NR_setresgid (__NR_Linux + 190)
-#endif
#ifndef __NR_rt_sigaction
#define __NR_rt_sigaction (__NR_Linux + 194)
#define __NR_rt_sigprocmask (__NR_Linux + 195)
-#define __NR_rt_sigpending (__NR_Linux + 196)
-#define __NR_rt_sigsuspend (__NR_Linux + 199)
-#endif
-#ifndef __NR_pread64
-#define __NR_pread64 (__NR_Linux + 200)
-#endif
-#ifndef __NR_pwrite64
-#define __NR_pwrite64 (__NR_Linux + 201)
#endif
#ifndef __NR_stat64
#define __NR_stat64 (__NR_Linux + 213)
@@ -890,245 +598,59 @@ struct kernel_statfs {
#ifndef __NR_gettid
#define __NR_gettid (__NR_Linux + 222)
#endif
-#ifndef __NR_readahead
-#define __NR_readahead (__NR_Linux + 223)
-#endif
-#ifndef __NR_setxattr
-#define __NR_setxattr (__NR_Linux + 224)
-#endif
-#ifndef __NR_lsetxattr
-#define __NR_lsetxattr (__NR_Linux + 225)
-#endif
-#ifndef __NR_getxattr
-#define __NR_getxattr (__NR_Linux + 227)
-#endif
-#ifndef __NR_lgetxattr
-#define __NR_lgetxattr (__NR_Linux + 228)
-#endif
-#ifndef __NR_listxattr
-#define __NR_listxattr (__NR_Linux + 230)
-#endif
-#ifndef __NR_llistxattr
-#define __NR_llistxattr (__NR_Linux + 231)
-#endif
#ifndef __NR_futex
#define __NR_futex (__NR_Linux + 238)
#endif
-#ifndef __NR_sched_setaffinity
-#define __NR_sched_setaffinity (__NR_Linux + 239)
-#define __NR_sched_getaffinity (__NR_Linux + 240)
-#endif
-#ifndef __NR_set_tid_address
-#define __NR_set_tid_address (__NR_Linux + 252)
-#endif
-#ifndef __NR_statfs64
-#define __NR_statfs64 (__NR_Linux + 255)
-#endif
-#ifndef __NR_fstatfs64
-#define __NR_fstatfs64 (__NR_Linux + 256)
-#endif
-#ifndef __NR_clock_gettime
-#define __NR_clock_gettime (__NR_Linux + 263)
-#endif
-#ifndef __NR_clock_getres
-#define __NR_clock_getres (__NR_Linux + 264)
-#endif
#ifndef __NR_openat
#define __NR_openat (__NR_Linux + 288)
#endif
#ifndef __NR_fstatat
#define __NR_fstatat (__NR_Linux + 293)
#endif
-#ifndef __NR_unlinkat
-#define __NR_unlinkat (__NR_Linux + 294)
-#endif
-#ifndef __NR_move_pages
-#define __NR_move_pages (__NR_Linux + 308)
-#endif
#ifndef __NR_getcpu
#define __NR_getcpu (__NR_Linux + 312)
#endif
-#ifndef __NR_ioprio_set
-#define __NR_ioprio_set (__NR_Linux + 314)
-#endif
-#ifndef __NR_ioprio_get
-#define __NR_ioprio_get (__NR_Linux + 315)
-#endif
/* End of MIPS (old 32bit API) definitions */
#elif _MIPS_SIM == _MIPS_SIM_ABI64
-#ifndef __NR_pread64
-#define __NR_pread64 (__NR_Linux + 16)
-#endif
-#ifndef __NR_pwrite64
-#define __NR_pwrite64 (__NR_Linux + 17)
-#endif
-#ifndef __NR_setresuid
-#define __NR_setresuid (__NR_Linux + 115)
-#define __NR_setresgid (__NR_Linux + 117)
-#endif
#ifndef __NR_gettid
#define __NR_gettid (__NR_Linux + 178)
#endif
-#ifndef __NR_readahead
-#define __NR_readahead (__NR_Linux + 179)
-#endif
-#ifndef __NR_setxattr
-#define __NR_setxattr (__NR_Linux + 180)
-#endif
-#ifndef __NR_lsetxattr
-#define __NR_lsetxattr (__NR_Linux + 181)
-#endif
-#ifndef __NR_getxattr
-#define __NR_getxattr (__NR_Linux + 183)
-#endif
-#ifndef __NR_lgetxattr
-#define __NR_lgetxattr (__NR_Linux + 184)
-#endif
-#ifndef __NR_listxattr
-#define __NR_listxattr (__NR_Linux + 186)
-#endif
-#ifndef __NR_llistxattr
-#define __NR_llistxattr (__NR_Linux + 187)
-#endif
#ifndef __NR_futex
#define __NR_futex (__NR_Linux + 194)
#endif
-#ifndef __NR_sched_setaffinity
-#define __NR_sched_setaffinity (__NR_Linux + 195)
-#define __NR_sched_getaffinity (__NR_Linux + 196)
-#endif
-#ifndef __NR_set_tid_address
-#define __NR_set_tid_address (__NR_Linux + 212)
-#endif
-#ifndef __NR_clock_gettime
-#define __NR_clock_gettime (__NR_Linux + 222)
-#endif
-#ifndef __NR_clock_getres
-#define __NR_clock_getres (__NR_Linux + 223)
-#endif
#ifndef __NR_openat
#define __NR_openat (__NR_Linux + 247)
#endif
#ifndef __NR_fstatat
#define __NR_fstatat (__NR_Linux + 252)
#endif
-#ifndef __NR_unlinkat
-#define __NR_unlinkat (__NR_Linux + 253)
-#endif
-#ifndef __NR_move_pages
-#define __NR_move_pages (__NR_Linux + 267)
-#endif
#ifndef __NR_getcpu
#define __NR_getcpu (__NR_Linux + 271)
#endif
-#ifndef __NR_ioprio_set
-#define __NR_ioprio_set (__NR_Linux + 273)
-#endif
-#ifndef __NR_ioprio_get
-#define __NR_ioprio_get (__NR_Linux + 274)
-#endif
/* End of MIPS (64bit API) definitions */
#else
-#ifndef __NR_setresuid
-#define __NR_setresuid (__NR_Linux + 115)
-#define __NR_setresgid (__NR_Linux + 117)
-#endif
#ifndef __NR_gettid
#define __NR_gettid (__NR_Linux + 178)
#endif
-#ifndef __NR_readahead
-#define __NR_readahead (__NR_Linux + 179)
-#endif
-#ifndef __NR_setxattr
-#define __NR_setxattr (__NR_Linux + 180)
-#endif
-#ifndef __NR_lsetxattr
-#define __NR_lsetxattr (__NR_Linux + 181)
-#endif
-#ifndef __NR_getxattr
-#define __NR_getxattr (__NR_Linux + 183)
-#endif
-#ifndef __NR_lgetxattr
-#define __NR_lgetxattr (__NR_Linux + 184)
-#endif
-#ifndef __NR_listxattr
-#define __NR_listxattr (__NR_Linux + 186)
-#endif
-#ifndef __NR_llistxattr
-#define __NR_llistxattr (__NR_Linux + 187)
-#endif
#ifndef __NR_futex
#define __NR_futex (__NR_Linux + 194)
#endif
-#ifndef __NR_sched_setaffinity
-#define __NR_sched_setaffinity (__NR_Linux + 195)
-#define __NR_sched_getaffinity (__NR_Linux + 196)
-#endif
-#ifndef __NR_set_tid_address
-#define __NR_set_tid_address (__NR_Linux + 213)
-#endif
-#ifndef __NR_statfs64
-#define __NR_statfs64 (__NR_Linux + 217)
-#endif
-#ifndef __NR_fstatfs64
-#define __NR_fstatfs64 (__NR_Linux + 218)
-#endif
-#ifndef __NR_clock_gettime
-#define __NR_clock_gettime (__NR_Linux + 226)
-#endif
-#ifndef __NR_clock_getres
-#define __NR_clock_getres (__NR_Linux + 227)
-#endif
#ifndef __NR_openat
#define __NR_openat (__NR_Linux + 251)
#endif
#ifndef __NR_fstatat
#define __NR_fstatat (__NR_Linux + 256)
#endif
-#ifndef __NR_unlinkat
-#define __NR_unlinkat (__NR_Linux + 257)
-#endif
-#ifndef __NR_move_pages
-#define __NR_move_pages (__NR_Linux + 271)
-#endif
#ifndef __NR_getcpu
#define __NR_getcpu (__NR_Linux + 275)
#endif
-#ifndef __NR_ioprio_set
-#define __NR_ioprio_set (__NR_Linux + 277)
-#endif
-#ifndef __NR_ioprio_get
-#define __NR_ioprio_get (__NR_Linux + 278)
-#endif
/* End of MIPS (new 32bit API) definitions */
#endif
/* End of MIPS definitions */
#elif defined(__PPC__)
-#ifndef __NR_setfsuid
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#endif
-#ifndef __NR_setresuid
-#define __NR_setresuid 164
-#define __NR_setresgid 169
-#endif
#ifndef __NR_rt_sigaction
#define __NR_rt_sigaction 173
#define __NR_rt_sigprocmask 174
-#define __NR_rt_sigpending 175
-#define __NR_rt_sigsuspend 178
-#endif
-#ifndef __NR_pread64
-#define __NR_pread64 179
-#endif
-#ifndef __NR_pwrite64
-#define __NR_pwrite64 180
-#endif
-#ifndef __NR_ugetrlimit
-#define __NR_ugetrlimit 190
-#endif
-#ifndef __NR_readahead
-#define __NR_readahead 191
#endif
#ifndef __NR_stat64
#define __NR_stat64 195
@@ -1142,67 +664,12 @@ struct kernel_statfs {
#ifndef __NR_gettid
#define __NR_gettid 207
#endif
-#ifndef __NR_setxattr
-#define __NR_setxattr 209
-#endif
-#ifndef __NR_lsetxattr
-#define __NR_lsetxattr 210
-#endif
-#ifndef __NR_getxattr
-#define __NR_getxattr 212
-#endif
-#ifndef __NR_lgetxattr
-#define __NR_lgetxattr 213
-#endif
-#ifndef __NR_listxattr
-#define __NR_listxattr 215
-#endif
-#ifndef __NR_llistxattr
-#define __NR_llistxattr 216
-#endif
#ifndef __NR_futex
#define __NR_futex 221
#endif
-#ifndef __NR_sched_setaffinity
-#define __NR_sched_setaffinity 222
-#define __NR_sched_getaffinity 223
-#endif
-#ifndef __NR_set_tid_address
-#define __NR_set_tid_address 232
-#endif
-#ifndef __NR_clock_gettime
-#define __NR_clock_gettime 246
-#endif
-#ifndef __NR_clock_getres
-#define __NR_clock_getres 247
-#endif
-#ifndef __NR_statfs64
-#define __NR_statfs64 252
-#endif
-#ifndef __NR_fstatfs64
-#define __NR_fstatfs64 253
-#endif
-#ifndef __NR_fadvise64_64
-#define __NR_fadvise64_64 254
-#endif
-#ifndef __NR_ioprio_set
-#define __NR_ioprio_set 273
-#endif
-#ifndef __NR_ioprio_get
-#define __NR_ioprio_get 274
-#endif
#ifndef __NR_openat
#define __NR_openat 286
#endif
-#ifndef __NR_fstatat64
-#define __NR_fstatat64 291
-#endif
-#ifndef __NR_unlinkat
-#define __NR_unlinkat 292
-#endif
-#ifndef __NR_move_pages
-#define __NR_move_pages 301
-#endif
#ifndef __NR_getcpu
#define __NR_getcpu 302
#endif
@@ -1269,7 +736,7 @@ struct kernel_statfs {
#endif
#undef LSS_RETURN
- #if (defined(__i386__) || defined(__x86_64__) || defined(__ARM_ARCH_3__))
+ #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__))
/* Failing system calls return a negative result in the range of
* -1..-4095. These are "errno" values with the sign inverted.
*/
@@ -1307,6 +774,15 @@ struct kernel_statfs {
} while (0)
#endif
#if defined(__i386__)
+ #if defined(NO_FRAME_POINTER) && (100 * __GNUC__ + __GNUC_MINOR__ >= 404)
+ /* This only works for GCC-4.4 and above -- the first version to use
+ .cfi directives for dwarf unwind info. */
+ #define CFI_ADJUST_CFA_OFFSET(adjust) \
+ ".cfi_adjust_cfa_offset " #adjust "\n"
+ #else
+ #define CFI_ADJUST_CFA_OFFSET(adjust) /**/
+ #endif
+
/* In PIC mode (e.g. when building shared libraries), gcc for i386
* reserves ebx. Unfortunately, most distribution ship with implementations
* of _syscallX() which clobber ebx.
@@ -1319,11 +795,13 @@ struct kernel_statfs {
#define LSS_BODY(type,args...) \
long __res; \
__asm__ __volatile__("push %%ebx\n" \
+ CFI_ADJUST_CFA_OFFSET(4) \
"movl %2,%%ebx\n" \
"int $0x80\n" \
- "pop %%ebx" \
+ "pop %%ebx\n" \
+ CFI_ADJUST_CFA_OFFSET(-4) \
args \
- : "memory"); \
+ : "esp", "memory"); \
LSS_RETURN(type,__res)
#undef _syscall0
#define _syscall0(type,name) \
@@ -1380,7 +858,7 @@ struct kernel_statfs {
: "i" (__NR_##name), "ri" ((long)(arg1)), \
"c" ((long)(arg2)), "d" ((long)(arg3)), \
"S" ((long)(arg4)), "D" ((long)(arg5)) \
- : "memory"); \
+ : "esp", "memory"); \
LSS_RETURN(type,__res); \
}
#undef _syscall6
@@ -1402,7 +880,7 @@ struct kernel_statfs {
: "i" (__NR_##name), "0" ((long)(&__s)), \
"c" ((long)(arg2)), "d" ((long)(arg3)), \
"S" ((long)(arg4)), "D" ((long)(arg5)) \
- : "memory"); \
+ : "esp", "memory"); \
LSS_RETURN(type,__res); \
}
LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
@@ -1488,36 +966,10 @@ struct kernel_statfs {
: "0"(-EINVAL), "i"(__NR_clone),
"m"(fn), "m"(child_stack), "m"(flags), "m"(arg),
"m"(parent_tidptr), "m"(newtls), "m"(child_tidptr)
- : "memory", "ecx", "edx", "esi", "edi");
+ : "esp", "memory", "ecx", "edx", "esi", "edi");
LSS_RETURN(int, __res);
}
- #define __NR__fadvise64_64 __NR_fadvise64_64
- LSS_INLINE _syscall6(int, _fadvise64_64, int, fd,
- unsigned, offset_lo, unsigned, offset_hi,
- unsigned, len_lo, unsigned, len_hi,
- int, advice)
-
- LSS_INLINE int LSS_NAME(fadvise64)(int fd, loff_t offset,
- loff_t len, int advice) {
- return LSS_NAME(_fadvise64_64)(fd,
- (unsigned)offset, (unsigned)(offset >>32),
- (unsigned)len, (unsigned)(len >> 32),
- advice);
- }
-
- #define __NR__fallocate __NR_fallocate
- LSS_INLINE _syscall6(int, _fallocate, int, fd,
- int, mode,
- unsigned, offset_lo, unsigned, offset_hi,
- unsigned, len_lo, unsigned, len_hi)
-
- LSS_INLINE int LSS_NAME(fallocate)(int fd, int mode,
- loff_t offset, loff_t len) {
- union { loff_t off; unsigned w[2]; } o = { offset }, l = { len };
- return LSS_NAME(_fallocate)(fd, mode, o.w[0], o.w[1], l.w[0], l.w[1]);
- }
-
LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) {
/* On i386, the kernel does not know how to return from a signal
* handler. Instead, it relies on user space to provide a
@@ -1596,7 +1048,7 @@ struct kernel_statfs {
__asm__ __volatile__("movq %5,%%r10; syscall" : \
"=a" (__res) : "0" (__NR_##name), \
"D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
- "g" ((long)(arg4)) : "r10", "r11", "rcx", "memory"); \
+ "r" ((long)(arg4)) : "r10", "r11", "rcx", "memory"); \
LSS_RETURN(type, __res); \
}
#undef _syscall5
@@ -1608,7 +1060,7 @@ struct kernel_statfs {
__asm__ __volatile__("movq %5,%%r10; movq %6,%%r8; syscall" : \
"=a" (__res) : "0" (__NR_##name), \
"D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
- "g" ((long)(arg4)), "g" ((long)(arg5)) : \
+ "r" ((long)(arg4)), "r" ((long)(arg5)) : \
"r8", "r10", "r11", "rcx", "memory"); \
LSS_RETURN(type, __res); \
}
@@ -1622,7 +1074,7 @@ struct kernel_statfs {
"syscall" : \
"=a" (__res) : "0" (__NR_##name), \
"D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
- "g" ((long)(arg4)), "g" ((long)(arg5)), "g" ((long)(arg6)) : \
+ "r" ((long)(arg4)), "r" ((long)(arg5)), "r" ((long)(arg6)) : \
"r8", "r9", "r10", "r11", "rcx", "memory"); \
LSS_RETURN(type, __res); \
}
@@ -1631,8 +1083,6 @@ struct kernel_statfs {
void *newtls, int *child_tidptr) {
long __res;
{
- register void *__tls __asm__("r8") = newtls;
- register int *__ctid __asm__("r10") = child_tidptr;
__asm__ __volatile__(/* if (fn == NULL)
* return -EINVAL;
*/
@@ -1665,6 +1115,8 @@ struct kernel_statfs {
* %r10 = child_tidptr)
*/
"movq %2,%%rax\n"
+ "movq %9,%%r8\n"
+ "movq %10,%%r10\n"
"syscall\n"
/* if (%rax != 0)
@@ -1695,13 +1147,11 @@ struct kernel_statfs {
: "=a" (__res)
: "0"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit),
"r"(fn), "S"(child_stack), "D"(flags), "r"(arg),
- "d"(parent_tidptr), "r"(__tls), "r"(__ctid)
- : "memory", "r11", "rcx");
+ "d"(parent_tidptr), "g"(newtls), "g"(child_tidptr)
+ : "rsp", "memory", "r8", "r10", "r11", "rcx");
}
LSS_RETURN(int, __res);
}
- LSS_INLINE _syscall4(int, fadvise64, int, fd, loff_t, offset, loff_t, len,
- int, advice)
LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) {
/* On x86-64, the kernel does not know how to return from
@@ -1721,7 +1171,7 @@ struct kernel_statfs {
: "i" (__NR_rt_sigreturn));
return res;
}
- #elif defined(__ARM_ARCH_3__)
+ #elif defined(__arm__)
/* Most definitions of _syscallX() neglect to mark "memory" as being
* clobbered. This causes problems with compilers, that do a better job
* at optimizing across __asm__ calls.
@@ -1729,12 +1179,26 @@ struct kernel_statfs {
*/
#undef LSS_REG
#define LSS_REG(r,a) register long __r##r __asm__("r"#r) = (long)a
+
+ /* r0..r3 are scratch registers and not preserved across function
+ * calls. We need to first evaluate the first 4 syscall arguments
+ * and store them on stack. They must be loaded into r0..r3 after
+ * all function calls to avoid r0..r3 being clobbered.
+ */
+ #undef LSS_SAVE_ARG
+ #define LSS_SAVE_ARG(r,a) long __tmp##r = (long)a
+ #undef LSS_LOAD_ARG
+ #define LSS_LOAD_ARG(r) register long __r##r __asm__("r"#r) = __tmp##r
+
#undef LSS_BODY
- #define LSS_BODY(type,name,args...) \
+ #define LSS_BODY(type, name, args...) \
register long __res_r0 __asm__("r0"); \
long __res; \
- __asm__ __volatile__ (__syscall(name) \
- : "=r"(__res_r0) : args : "lr", "memory"); \
+ __SYS_REG(name) \
+ __asm__ __volatile__ (__syscall_safe(name) \
+ : "=r"(__res_r0) \
+ : __SYS_REG_LIST(args) \
+ : "lr", "memory"); \
__res = __res_r0; \
LSS_RETURN(type, __res)
#undef _syscall0
@@ -1745,77 +1209,126 @@ struct kernel_statfs {
#undef _syscall1
#define _syscall1(type, name, type1, arg1) \
type LSS_NAME(name)(type1 arg1) { \
- LSS_REG(0, arg1); LSS_BODY(type, name, "r"(__r0)); \
+ /* There is no need for using a volatile temp. */ \
+ LSS_REG(0, arg1); \
+ LSS_BODY(type, name, "r"(__r0)); \
}
#undef _syscall2
#define _syscall2(type, name, type1, arg1, type2, arg2) \
type LSS_NAME(name)(type1 arg1, type2 arg2) { \
- LSS_REG(0, arg1); LSS_REG(1, arg2); \
+ LSS_SAVE_ARG(0, arg1); \
+ LSS_SAVE_ARG(1, arg2); \
+ LSS_LOAD_ARG(0); \
+ LSS_LOAD_ARG(1); \
LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \
}
#undef _syscall3
#define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
- LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
+ LSS_SAVE_ARG(0, arg1); \
+ LSS_SAVE_ARG(1, arg2); \
+ LSS_SAVE_ARG(2, arg3); \
+ LSS_LOAD_ARG(0); \
+ LSS_LOAD_ARG(1); \
+ LSS_LOAD_ARG(2); \
LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \
}
#undef _syscall4
- #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+ #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
+ type4, arg4) \
type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
- LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
- LSS_REG(3, arg4); \
+ LSS_SAVE_ARG(0, arg1); \
+ LSS_SAVE_ARG(1, arg2); \
+ LSS_SAVE_ARG(2, arg3); \
+ LSS_SAVE_ARG(3, arg4); \
+ LSS_LOAD_ARG(0); \
+ LSS_LOAD_ARG(1); \
+ LSS_LOAD_ARG(2); \
+ LSS_LOAD_ARG(3); \
LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \
}
#undef _syscall5
- #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5) \
+ #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
+ type4, arg4, type5, arg5) \
type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
type5 arg5) { \
- LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
- LSS_REG(3, arg4); LSS_REG(4, arg5); \
+ LSS_SAVE_ARG(0, arg1); \
+ LSS_SAVE_ARG(1, arg2); \
+ LSS_SAVE_ARG(2, arg3); \
+ LSS_SAVE_ARG(3, arg4); \
+ LSS_REG(4, arg5); \
+ LSS_LOAD_ARG(0); \
+ LSS_LOAD_ARG(1); \
+ LSS_LOAD_ARG(2); \
+ LSS_LOAD_ARG(3); \
LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
"r"(__r4)); \
}
#undef _syscall6
- #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5,type6,arg6) \
+ #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
+ type4, arg4, type5, arg5, type6, arg6) \
type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
type5 arg5, type6 arg6) { \
- LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \
- LSS_REG(3, arg4); LSS_REG(4, arg5); LSS_REG(5, arg6); \
+ LSS_SAVE_ARG(0, arg1); \
+ LSS_SAVE_ARG(1, arg2); \
+ LSS_SAVE_ARG(2, arg3); \
+ LSS_SAVE_ARG(3, arg4); \
+ LSS_REG(4, arg5); \
+ LSS_REG(5, arg6); \
+ LSS_LOAD_ARG(0); \
+ LSS_LOAD_ARG(1); \
+ LSS_LOAD_ARG(2); \
+ LSS_LOAD_ARG(3); \
LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
"r"(__r4), "r"(__r5)); \
}
LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
int flags, void *arg, int *parent_tidptr,
void *newtls, int *child_tidptr) {
- long __res;
+ register long __res __asm__("r5");
{
- register int __flags __asm__("r0") = flags;
- register void *__stack __asm__("r1") = child_stack;
- register void *__ptid __asm__("r2") = parent_tidptr;
- register void *__tls __asm__("r3") = newtls;
- register int *__ctid __asm__("r4") = child_tidptr;
- __asm__ __volatile__(/* if (fn == NULL || child_stack == NULL)
- * return -EINVAL;
- */
- "cmp %2,#0\n"
- "cmpne %3,#0\n"
- "moveq %0,%1\n"
- "beq 1f\n"
+ if (fn == NULL || child_stack == NULL) {
+ __res = -EINVAL;
+ goto clone_exit;
+ }
- /* Push "arg" and "fn" onto the stack that will be
- * used by the child.
- */
- "str %5,[%3,#-4]!\n"
- "str %2,[%3,#-4]!\n"
+ /* stash first 4 arguments on stack first because we can only load
+ * them after all function calls.
+ */
+ int tmp_flags = flags;
+ int * tmp_stack = (int*) child_stack;
+ void * tmp_ptid = parent_tidptr;
+ void * tmp_tls = newtls;
+
+ register int *__ctid __asm__("r4") = child_tidptr;
- /* %r0 = syscall(%r0 = flags,
+ /* Push "arg" and "fn" onto the stack that will be
+ * used by the child.
+ */
+ *(--tmp_stack) = (int) arg;
+ *(--tmp_stack) = (int) fn;
+
+ /* We must load r0..r3 last after all possible function calls. */
+ register int __flags __asm__("r0") = tmp_flags;
+ register void *__stack __asm__("r1") = tmp_stack;
+ register void *__ptid __asm__("r2") = tmp_ptid;
+ register void *__tls __asm__("r3") = tmp_tls;
+
+ /* %r0 = syscall(%r0 = flags,
+ * %r1 = child_stack,
+ * %r2 = parent_tidptr,
+ * %r3 = newtls,
+ * %r4 = child_tidptr)
+ */
+ __SYS_REG(clone)
+ __asm__ __volatile__(/* %r0 = syscall(%r0 = flags,
* %r1 = child_stack,
* %r2 = parent_tidptr,
* %r3 = newtls,
* %r4 = child_tidptr)
*/
+ "push {r7}\n"
+ "mov r7,%1\n"
__syscall(clone)"\n"
/* if (%r0 != 0)
@@ -1830,30 +1343,48 @@ struct kernel_statfs {
"mov lr,pc\n"
"ldr pc,[sp]\n"
- /* Call _exit(%r0).
+ /* Call _exit(%r0), which never returns. We only
+ * need to set r7 for EABI syscall ABI but we do
+ * this always to simplify code sharing between
+ * old and new syscall ABIs.
*/
+ "mov r7,%2\n"
__syscall(exit)"\n"
- "1:\n"
+
+ /* Pop r7 from the stack only in the parent.
+ */
+ "1: pop {r7}\n"
: "=r" (__res)
- : "i"(-EINVAL),
- "r"(fn), "r"(__stack), "r"(__flags), "r"(arg),
+ : "r"(__sysreg),
+ "i"(__NR_exit), "r"(__stack), "r"(__flags),
"r"(__ptid), "r"(__tls), "r"(__ctid)
- : "lr", "memory");
+ : "cc", "lr", "memory");
}
+ clone_exit:
LSS_RETURN(int, __res);
}
#elif defined(__mips__)
#undef LSS_REG
#define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \
(unsigned long)(a)
+
+ #if _MIPS_SIM == _MIPS_SIM_ABI32
+ // See http://sources.redhat.com/ml/libc-alpha/2004-10/msg00050.html
+ // or http://www.linux-mips.org/archives/linux-mips/2004-10/msg00142.html
+ #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12",\
+ "$13", "$14", "$15", "$24", "$25", "memory"
+ #else
+ #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
+ "$14", "$15", "$24", "$25", "memory"
+ #endif
+
#undef LSS_BODY
#define LSS_BODY(type,name,r7,...) \
register unsigned long __v0 __asm__("$2") = __NR_##name; \
__asm__ __volatile__ ("syscall\n" \
: "=&r"(__v0), r7 (__r7) \
: "0"(__v0), ##__VA_ARGS__ \
- : "$8", "$9", "$10", "$11", "$12", \
- "$13", "$14", "$15", "$24", "memory"); \
+ : MIPS_SYSCALL_CLOBBERS); \
LSS_RETURN(type, __v0, __r7)
#undef _syscall0
#define _syscall0(type, name) \
@@ -1911,8 +1442,7 @@ struct kernel_statfs {
: "=&r"(__v0), "+r" (__r7) \
: "i" (__NR_##name), "r"(__r4), "r"(__r5), \
"r"(__r6), "m" ((unsigned long)arg5) \
- : "$8", "$9", "$10", "$11", "$12", \
- "$13", "$14", "$15", "$24", "memory"); \
+ : MIPS_SYSCALL_CLOBBERS); \
LSS_RETURN(type, __v0, __r7); \
}
#else
@@ -1952,8 +1482,7 @@ struct kernel_statfs {
: "i" (__NR_##name), "r"(__r4), "r"(__r5), \
"r"(__r6), "r" ((unsigned long)arg5), \
"r" ((unsigned long)arg6) \
- : "$8", "$9", "$10", "$11", "$12", \
- "$13", "$14", "$15", "$24", "memory"); \
+ : MIPS_SYSCALL_CLOBBERS); \
LSS_RETURN(type, __v0, __r7); \
}
#else
@@ -2249,173 +1778,68 @@ struct kernel_statfs {
#define __NR__exit __NR_exit
#define __NR__gettid __NR_gettid
#define __NR__mremap __NR_mremap
- LSS_INLINE _syscall1(int, chdir, const char *,p)
LSS_INLINE _syscall1(int, close, int, f)
- LSS_INLINE _syscall2(int, clock_getres, int, c,
- struct kernel_timespec*, t)
- LSS_INLINE _syscall2(int, clock_gettime, int, c,
- struct kernel_timespec*, t)
- LSS_INLINE _syscall1(int, dup, int, f)
- LSS_INLINE _syscall2(int, dup2, int, s,
- int, d)
- LSS_INLINE _syscall3(int, execve, const char*, f,
- const char*const*,a,const char*const*, e)
LSS_INLINE _syscall1(int, _exit, int, e)
LSS_INLINE _syscall3(int, fcntl, int, f,
int, c, long, a)
- LSS_INLINE _syscall0(pid_t, fork)
LSS_INLINE _syscall2(int, fstat, int, f,
struct kernel_stat*, b)
- LSS_INLINE _syscall2(int, fstatfs, int, f,
- struct kernel_statfs*, b)
LSS_INLINE _syscall4(int, futex, int*, a,
int, o, int, v,
struct kernel_timespec*, t)
LSS_INLINE _syscall3(int, getdents, int, f,
struct kernel_dirent*, d, int, c)
+#ifdef __NR_getdents64
LSS_INLINE _syscall3(int, getdents64, int, f,
struct kernel_dirent64*, d, int, c)
- LSS_INLINE _syscall0(gid_t, getegid)
- LSS_INLINE _syscall0(uid_t, geteuid)
- LSS_INLINE _syscall0(pid_t, getpgrp)
+#endif
LSS_INLINE _syscall0(pid_t, getpid)
LSS_INLINE _syscall0(pid_t, getppid)
- LSS_INLINE _syscall2(int, getpriority, int, a,
- int, b)
- LSS_INLINE _syscall2(int, getrlimit, int, r,
- struct kernel_rlimit*, l)
- LSS_INLINE _syscall1(pid_t, getsid, pid_t, p)
LSS_INLINE _syscall0(pid_t, _gettid)
- LSS_INLINE _syscall5(int, setxattr, const char *,p,
- const char *, n, const void *,v,
- size_t, s, int, f)
- LSS_INLINE _syscall5(int, lsetxattr, const char *,p,
- const char *, n, const void *,v,
- size_t, s, int, f)
- LSS_INLINE _syscall4(ssize_t, getxattr, const char *,p,
- const char *, n, void *, v, size_t, s)
- LSS_INLINE _syscall4(ssize_t, lgetxattr, const char *,p,
- const char *, n, void *, v, size_t, s)
- LSS_INLINE _syscall3(ssize_t, listxattr, const char *,p,
- char *, l, size_t, s)
- LSS_INLINE _syscall3(ssize_t, llistxattr, const char *,p,
- char *, l, size_t, s)
- LSS_INLINE _syscall2(int, ioprio_get, int, which,
- int, who)
- LSS_INLINE _syscall3(int, ioprio_set, int, which,
- int, who, int, ioprio)
LSS_INLINE _syscall2(int, kill, pid_t, p,
int, s)
LSS_INLINE _syscall3(off_t, lseek, int, f,
off_t, o, int, w)
LSS_INLINE _syscall2(int, munmap, void*, s,
size_t, l)
- LSS_INLINE _syscall6(long, move_pages, pid_t, p,
- unsigned long, n, void **,g, int *, d,
- int *, s, int, f)
LSS_INLINE _syscall5(void*, _mremap, void*, o,
size_t, os, size_t, ns,
unsigned long, f, void *, a)
LSS_INLINE _syscall3(int, open, const char*, p,
int, f, int, m)
- LSS_INLINE _syscall3(int, poll, struct kernel_pollfd*, u,
- unsigned int, n, int, t)
LSS_INLINE _syscall2(int, prctl, int, o,
long, a)
LSS_INLINE _syscall4(long, ptrace, int, r,
pid_t, p, void *, a, void *, d)
LSS_INLINE _syscall3(ssize_t, read, int, f,
void *, b, size_t, c)
- LSS_INLINE _syscall3(int, readlink, const char*, p,
- char*, b, size_t, s)
LSS_INLINE _syscall4(int, rt_sigaction, int, s,
const struct kernel_sigaction*, a,
struct kernel_sigaction*, o, size_t, c)
- LSS_INLINE _syscall2(int, rt_sigpending, struct kernel_sigset_t *, s,
- size_t, c)
LSS_INLINE _syscall4(int, rt_sigprocmask, int, h,
const struct kernel_sigset_t*, s,
struct kernel_sigset_t*, o, size_t, c);
- LSS_INLINE _syscall2(int, rt_sigsuspend,
- const struct kernel_sigset_t*, s, size_t, c);
- LSS_INLINE _syscall3(int, sched_getaffinity,pid_t, p,
- unsigned int, l, unsigned long *, m)
- LSS_INLINE _syscall3(int, sched_setaffinity,pid_t, p,
- unsigned int, l, unsigned long *, m)
LSS_INLINE _syscall0(int, sched_yield)
- LSS_INLINE _syscall1(long, set_tid_address, int *, t)
- LSS_INLINE _syscall1(int, setfsgid, gid_t, g)
- LSS_INLINE _syscall1(int, setfsuid, uid_t, u)
- LSS_INLINE _syscall1(int, setuid, uid_t, u)
- LSS_INLINE _syscall1(int, setgid, gid_t, g)
- LSS_INLINE _syscall2(int, setpgid, pid_t, p,
- pid_t, g)
- LSS_INLINE _syscall3(int, setpriority, int, a,
- int, b, int, p)
- LSS_INLINE _syscall3(int, setresgid, gid_t, r,
- gid_t, e, gid_t, s)
- LSS_INLINE _syscall3(int, setresuid, uid_t, r,
- uid_t, e, uid_t, s)
- LSS_INLINE _syscall2(int, setrlimit, int, r,
- const struct kernel_rlimit*, l)
- LSS_INLINE _syscall0(pid_t, setsid)
LSS_INLINE _syscall2(int, sigaltstack, const stack_t*, s,
const stack_t*, o)
LSS_INLINE _syscall2(int, stat, const char*, f,
struct kernel_stat*, b)
- LSS_INLINE _syscall2(int, statfs, const char*, f,
- struct kernel_statfs*, b)
LSS_INLINE _syscall3(ssize_t, write, int, f,
const void *, b, size_t, c)
- LSS_INLINE _syscall3(ssize_t, writev, int, f,
- const struct kernel_iovec*, v, size_t, c)
#if defined(__NR_getcpu)
LSS_INLINE _syscall3(long, getcpu, unsigned *, cpu,
unsigned *, node, void *, unused);
#endif
#if defined(__x86_64__) || \
(defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
- LSS_INLINE _syscall3(int, recvmsg, int, s,
- struct kernel_msghdr*, m, int, f)
- LSS_INLINE _syscall3(int, sendmsg, int, s,
- const struct kernel_msghdr*, m, int, f)
- LSS_INLINE _syscall6(int, sendto, int, s,
- const void*, m, size_t, l,
- int, f,
- const struct kernel_sockaddr*, a, int, t)
- LSS_INLINE _syscall2(int, shutdown, int, s,
- int, h)
LSS_INLINE _syscall3(int, socket, int, d,
int, t, int, p)
- LSS_INLINE _syscall4(int, socketpair, int, d,
- int, t, int, p, int*, s)
#endif
#if defined(__x86_64__)
- LSS_INLINE _syscall4(int, fallocate, int, fd, int, mode,
- loff_t, offset, loff_t, len)
LSS_INLINE _syscall6(void*, mmap, void*, s,
size_t, l, int, p,
int, f, int, d,
__off64_t, o)
- LSS_INLINE _syscall4(int, newfstatat, int, d,
- const char *, p,
- struct kernel_stat*, b, int, f)
-
- LSS_INLINE int LSS_NAME(setfsgid32)(gid_t gid) {
- return LSS_NAME(setfsgid)(gid);
- }
-
- LSS_INLINE int LSS_NAME(setfsuid32)(uid_t uid) {
- return LSS_NAME(setfsuid)(uid);
- }
-
- LSS_INLINE int LSS_NAME(setresgid32)(gid_t rgid, gid_t egid, gid_t sgid) {
- return LSS_NAME(setresgid)(rgid, egid, sgid);
- }
-
- LSS_INLINE int LSS_NAME(setresuid32)(uid_t ruid, uid_t euid, uid_t suid) {
- return LSS_NAME(setresuid)(ruid, euid, suid);
- }
LSS_INLINE int LSS_NAME(sigaction)(int signum,
const struct kernel_sigaction *act,
@@ -2437,114 +1861,35 @@ struct kernel_statfs {
}
}
- LSS_INLINE int LSS_NAME(sigpending)(struct kernel_sigset_t *set) {
- return LSS_NAME(rt_sigpending)(set, (KERNEL_NSIG+7)/8);
- }
-
LSS_INLINE int LSS_NAME(sigprocmask)(int how,
const struct kernel_sigset_t *set,
struct kernel_sigset_t *oldset) {
return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8);
}
-
- LSS_INLINE int LSS_NAME(sigsuspend)(const struct kernel_sigset_t *set) {
- return LSS_NAME(rt_sigsuspend)(set, (KERNEL_NSIG+7)/8);
- }
#endif
- #if defined(__x86_64__) || defined(__ARM_ARCH_3__) || \
+ #if defined(__x86_64__) || \
+ defined(__arm__) || \
(defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
LSS_INLINE _syscall4(pid_t, wait4, pid_t, p,
int*, s, int, o,
- struct kernel_rusage*, r)
-
+ struct kernel_rusage*, r)
LSS_INLINE pid_t LSS_NAME(waitpid)(pid_t pid, int *status, int options){
return LSS_NAME(wait4)(pid, status, options, 0);
}
- #endif
- #if defined(__i386__) || defined(__x86_64__)
+ #endif
+ #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
LSS_INLINE _syscall4(int, openat, int, d, const char *, p, int, f, int, m)
- LSS_INLINE _syscall3(int, unlinkat, int, d, const char *, p, int, f)
- #endif
- #if defined(__i386__) || defined(__ARM_ARCH_3__)
- #define __NR__setfsgid32 __NR_setfsgid32
- #define __NR__setfsuid32 __NR_setfsuid32
- #define __NR__setresgid32 __NR_setresgid32
- #define __NR__setresuid32 __NR_setresuid32
- LSS_INLINE _syscall2(int, ugetrlimit, int, r,
- struct kernel_rlimit*, l)
- LSS_INLINE _syscall1(int, _setfsgid32, gid_t, f)
- LSS_INLINE _syscall1(int, _setfsuid32, uid_t, f)
- LSS_INLINE _syscall3(int, _setresgid32, gid_t, r,
- gid_t, e, gid_t, s)
- LSS_INLINE _syscall3(int, _setresuid32, uid_t, r,
- uid_t, e, uid_t, s)
-
- LSS_INLINE int LSS_NAME(setfsgid32)(gid_t gid) {
- int rc;
- if ((rc = LSS_NAME(_setfsgid32)(gid)) < 0 &&
- LSS_ERRNO == ENOSYS) {
- if ((unsigned int)gid & ~0xFFFFu) {
- rc = EINVAL;
- } else {
- rc = LSS_NAME(setfsgid)(gid);
- }
- }
- return rc;
- }
-
- LSS_INLINE int LSS_NAME(setfsuid32)(uid_t uid) {
- int rc;
- if ((rc = LSS_NAME(_setfsuid32)(uid)) < 0 &&
- LSS_ERRNO == ENOSYS) {
- if ((unsigned int)uid & ~0xFFFFu) {
- rc = EINVAL;
- } else {
- rc = LSS_NAME(setfsuid)(uid);
- }
- }
- return rc;
- }
-
- LSS_INLINE int LSS_NAME(setresgid32)(gid_t rgid, gid_t egid, gid_t sgid) {
- int rc;
- if ((rc = LSS_NAME(_setresgid32)(rgid, egid, sgid)) < 0 &&
- LSS_ERRNO == ENOSYS) {
- if ((unsigned int)rgid & ~0xFFFFu ||
- (unsigned int)egid & ~0xFFFFu ||
- (unsigned int)sgid & ~0xFFFFu) {
- rc = EINVAL;
- } else {
- rc = LSS_NAME(setresgid)(rgid, egid, sgid);
- }
- }
- return rc;
- }
-
- LSS_INLINE int LSS_NAME(setresuid32)(uid_t ruid, uid_t euid, uid_t suid) {
- int rc;
- if ((rc = LSS_NAME(_setresuid32)(ruid, euid, suid)) < 0 &&
- LSS_ERRNO == ENOSYS) {
- if ((unsigned int)ruid & ~0xFFFFu ||
- (unsigned int)euid & ~0xFFFFu ||
- (unsigned int)suid & ~0xFFFFu) {
- rc = EINVAL;
- } else {
- rc = LSS_NAME(setresuid)(ruid, euid, suid);
- }
- }
- return rc;
- }
#endif
LSS_INLINE int LSS_NAME(sigemptyset)(struct kernel_sigset_t *set) {
memset(&set->sig, 0, sizeof(set->sig));
return 0;
}
-
+
LSS_INLINE int LSS_NAME(sigfillset)(struct kernel_sigset_t *set) {
memset(&set->sig, -1, sizeof(set->sig));
return 0;
}
-
+
LSS_INLINE int LSS_NAME(sigaddset)(struct kernel_sigset_t *set,
int signum) {
if (signum < 1 || signum > (int)(8*sizeof(set->sig))) {
@@ -2556,7 +1901,7 @@ struct kernel_statfs {
return 0;
}
}
-
+
LSS_INLINE int LSS_NAME(sigdelset)(struct kernel_sigset_t *set,
int signum) {
if (signum < 1 || signum > (int)(8*sizeof(set->sig))) {
@@ -2568,30 +1913,26 @@ struct kernel_statfs {
return 0;
}
}
-
- LSS_INLINE int LSS_NAME(sigismember)(struct kernel_sigset_t *set,
- int signum) {
- if (signum < 1 || signum > (int)(8*sizeof(set->sig))) {
- LSS_ERRNO = EINVAL;
- return -1;
- } else {
- return !!(set->sig[(signum - 1)/(8*sizeof(set->sig[0]))] &
- (1UL << ((signum - 1) % (8*sizeof(set->sig[0])))));
- }
- }
- #if defined(__i386__) || defined(__ARM_ARCH_3__) || \
+
+ #if defined(__i386__) || \
+ defined(__arm__) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || defined(__PPC__)
#define __NR__sigaction __NR_sigaction
- #define __NR__sigpending __NR_sigpending
#define __NR__sigprocmask __NR_sigprocmask
- #define __NR__sigsuspend __NR_sigsuspend
- #define __NR__socketcall __NR_socketcall
LSS_INLINE _syscall2(int, fstat64, int, f,
struct kernel_stat64 *, b)
LSS_INLINE _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
loff_t *, res, uint, wh)
+#ifdef __PPC64__
+ LSS_INLINE _syscall6(void*, mmap, void*, s,
+ size_t, l, int, p,
+ int, f, int, d,
+ off_t, o)
+#else
+ #ifndef __ARM_EABI__
+ /* Not available on ARM EABI Linux. */
LSS_INLINE _syscall1(void*, mmap, void*, a)
-#ifndef __PPC64__
+ #endif
LSS_INLINE _syscall6(void*, mmap2, void*, s,
size_t, l, int, p,
int, f, int, d,
@@ -2600,17 +1941,9 @@ struct kernel_statfs {
LSS_INLINE _syscall3(int, _sigaction, int, s,
const struct kernel_old_sigaction*, a,
struct kernel_old_sigaction*, o)
- LSS_INLINE _syscall1(int, _sigpending, unsigned long*, s)
LSS_INLINE _syscall3(int, _sigprocmask, int, h,
const unsigned long*, s,
unsigned long*, o)
- #ifdef __PPC__
- LSS_INLINE _syscall1(int, _sigsuspend, unsigned long, s)
- #else
- LSS_INLINE _syscall3(int, _sigsuspend, const void*, a,
- int, b,
- unsigned long, s)
- #endif
LSS_INLINE _syscall2(int, stat64, const char *, p,
struct kernel_stat64 *, b)
@@ -2676,17 +2009,6 @@ struct kernel_statfs {
return rc;
}
- LSS_INLINE int LSS_NAME(sigpending)(struct kernel_sigset_t *set) {
- int old_errno = LSS_ERRNO;
- int rc = LSS_NAME(rt_sigpending)(set, (KERNEL_NSIG+7)/8);
- if (rc < 0 && LSS_ERRNO == ENOSYS) {
- LSS_ERRNO = old_errno;
- LSS_NAME(sigemptyset)(set);
- rc = LSS_NAME(_sigpending)(&set->sig[0]);
- }
- return rc;
- }
-
LSS_INLINE int LSS_NAME(sigprocmask)(int how,
const struct kernel_sigset_t *set,
struct kernel_sigset_t *oldset) {
@@ -2703,20 +2025,6 @@ struct kernel_statfs {
}
return rc;
}
-
- LSS_INLINE int LSS_NAME(sigsuspend)(const struct kernel_sigset_t *set) {
- int olderrno = LSS_ERRNO;
- int rc = LSS_NAME(rt_sigsuspend)(set, (KERNEL_NSIG+7)/8);
- if (rc < 0 && LSS_ERRNO == ENOSYS) {
- LSS_ERRNO = olderrno;
- rc = LSS_NAME(_sigsuspend)(
- #ifndef __PPC__
- set, 0,
- #endif
- set->sig[0]);
- }
- return rc;
- }
#endif
#if defined(__PPC__)
#undef LSS_SC_LOADARGS_0
@@ -2773,90 +2081,31 @@ struct kernel_statfs {
} \
LSS_RETURN(type, __sc_ret, __sc_err)
- LSS_INLINE ssize_t LSS_NAME(recvmsg)(int s,struct kernel_msghdr *msg,
- int flags){
- LSS_SC_BODY(3, ssize_t, 17, s, msg, flags);
- }
-
- LSS_INLINE ssize_t LSS_NAME(sendmsg)(int s,
- const struct kernel_msghdr *msg,
- int flags) {
- LSS_SC_BODY(3, ssize_t, 16, s, msg, flags);
- }
-
- // TODO(csilvers): why is this ifdef'ed out?
-#if 0
- LSS_INLINE ssize_t LSS_NAME(sendto)(int s, const void *buf, size_t len,
- int flags,
- const struct kernel_sockaddr *to,
- unsigned int tolen) {
- LSS_BODY(6, ssize_t, 11, s, buf, len, flags, to, tolen);
- }
-#endif
-
- LSS_INLINE int LSS_NAME(shutdown)(int s, int how) {
- LSS_SC_BODY(2, int, 13, s, how);
- }
-
LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) {
LSS_SC_BODY(3, int, 1, domain, type, protocol);
}
-
- LSS_INLINE int LSS_NAME(socketpair)(int d, int type, int protocol,
- int sv[2]) {
- LSS_SC_BODY(4, int, 8, d, type, protocol, sv);
- }
#endif
- #if defined(__i386__) || defined(__ARM_ARCH_3__) || \
+ #if defined(__i386__) || \
+ (defined(__arm__) && !defined(__ARM_EABI__)) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
- #define __NR__socketcall __NR_socketcall
- LSS_INLINE _syscall2(int, _socketcall, int, c,
- va_list, a)
-
- LSS_INLINE int LSS_NAME(socketcall)(int op, ...) {
- int rc;
- va_list ap;
- va_start(ap, op);
- rc = LSS_NAME(_socketcall)(op, ap);
- va_end(ap);
- return rc;
- }
-
- LSS_INLINE ssize_t LSS_NAME(recvmsg)(int s,struct kernel_msghdr *msg,
- int flags){
- return (ssize_t)LSS_NAME(socketcall)(17, s, msg, flags);
- }
- LSS_INLINE ssize_t LSS_NAME(sendmsg)(int s,
- const struct kernel_msghdr *msg,
- int flags) {
- return (ssize_t)LSS_NAME(socketcall)(16, s, msg, flags);
- }
-
- LSS_INLINE ssize_t LSS_NAME(sendto)(int s, const void *buf, size_t len,
- int flags,
- const struct kernel_sockaddr *to,
- unsigned int tolen) {
- return (ssize_t)LSS_NAME(socketcall)(11, s, buf, len, flags, to, tolen);
- }
-
- LSS_INLINE int LSS_NAME(shutdown)(int s, int how) {
- return LSS_NAME(socketcall)(13, s, how);
- }
+ /* See sys_socketcall in net/socket.c in kernel source.
+ * It de-multiplexes on its first arg and unpacks the arglist
+ * array in its second arg.
+ */
+ LSS_INLINE _syscall2(long, socketcall, int, c, unsigned long*, a)
LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) {
- return LSS_NAME(socketcall)(1, domain, type, protocol);
+ unsigned long args[3] = {
+ (unsigned long) domain,
+ (unsigned long) type,
+ (unsigned long) protocol
+ };
+ return LSS_NAME(socketcall)(1, args);
}
-
- LSS_INLINE int LSS_NAME(socketpair)(int d, int type, int protocol,
- int sv[2]) {
- return LSS_NAME(socketcall)(8, d, type, protocol, sv);
- }
- #endif
- #if defined(__i386__) || defined(__PPC__)
- LSS_INLINE _syscall4(int, fstatat64, int, d,
- const char *, p,
- struct kernel_stat64 *, b, int, f)
+ #elif defined(__ARM_EABI__)
+ LSS_INLINE _syscall3(int, socket, int, d,
+ int, t, int, p)
#endif
#if defined(__i386__) || defined(__PPC__) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
@@ -2888,28 +2137,6 @@ struct kernel_statfs {
#else
LSS_INLINE _syscall1(int, pipe, int *, p)
#endif
- /* TODO(csilvers): see if ppc can/should support this as well */
- #if defined(__i386__) || defined(__ARM_ARCH_3__) || \
- (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI64)
- #define __NR__statfs64 __NR_statfs64
- #define __NR__fstatfs64 __NR_fstatfs64
- LSS_INLINE _syscall3(int, _statfs64, const char*, p,
- size_t, s,struct kernel_statfs64*, b)
- LSS_INLINE _syscall3(int, _fstatfs64, int, f,
- size_t, s,struct kernel_statfs64*, b)
- LSS_INLINE int LSS_NAME(statfs64)(const char *p,
- struct kernel_statfs64 *b) {
- return LSS_NAME(_statfs64)(p, sizeof(*b), b);
- }
- LSS_INLINE int LSS_NAME(fstatfs64)(int f,struct kernel_statfs64 *b) {
- return LSS_NAME(_fstatfs64)(f, sizeof(*b), b);
- }
- #endif
-
- LSS_INLINE int LSS_NAME(execv)(const char *path, const char *const argv[]) {
- extern char **environ;
- return LSS_NAME(execve)(path, argv, (const char *const *)environ);
- }
LSS_INLINE pid_t LSS_NAME(gettid)() {
pid_t tid = LSS_NAME(_gettid)();
@@ -2946,72 +2173,6 @@ struct kernel_statfs {
LSS_ERRNO = err;
return rc;
}
-
- LSS_INLINE int LSS_NAME(raise)(int sig) {
- return LSS_NAME(kill)(LSS_NAME(getpid)(), sig);
- }
-
- LSS_INLINE int LSS_NAME(setpgrp)() {
- return LSS_NAME(setpgid)(0, 0);
- }
-
- LSS_INLINE int LSS_NAME(sysconf)(int name) {
- extern int __getpagesize(void);
- switch (name) {
- case _SC_OPEN_MAX: {
- struct kernel_rlimit limit;
- return LSS_NAME(getrlimit)(RLIMIT_NOFILE, &limit) < 0
- ? 8192 : limit.rlim_cur;
- }
- case _SC_PAGESIZE:
- return __getpagesize();
- default:
- errno = ENOSYS;
- return -1;
- }
- }
- #if defined(__x86_64__) || \
- (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64)
- LSS_INLINE _syscall4(ssize_t, pread64, int, f,
- void *, b, size_t, c,
- loff_t, o)
- LSS_INLINE _syscall4(ssize_t, pwrite64, int, f,
- const void *, b, size_t, c,
- loff_t, o)
- LSS_INLINE _syscall3(int, readahead, int, f,
- loff_t, o, unsigned, c)
- #else
- #define __NR__pread64 __NR_pread64
- #define __NR__pwrite64 __NR_pwrite64
- #define __NR__readahead __NR_readahead
- LSS_INLINE _syscall5(ssize_t, _pread64, int, f,
- void *, b, size_t, c, unsigned, o1,
- unsigned, o2)
- LSS_INLINE _syscall5(ssize_t, _pwrite64, int, f,
- const void *, b, size_t, c, unsigned, o1,
- long, o2)
- LSS_INLINE _syscall4(int, _readahead, int, f,
- unsigned, o1, unsigned, o2, size_t, c);
- /* We force 64bit-wide parameters onto the stack, then access each
- * 32-bit component individually. This guarantees that we build the
- * correct parameters independent of the native byte-order of the
- * underlying architecture.
- */
- LSS_INLINE ssize_t LSS_NAME(pread64)(int fd, void *buf, size_t count,
- loff_t off) {
- union { loff_t off; unsigned arg[2]; } o = { off };
- return LSS_NAME(_pread64)(fd, buf, count, o.arg[0], o.arg[1]);
- }
- LSS_INLINE ssize_t LSS_NAME(pwrite64)(int fd, const void *buf,
- size_t count, loff_t off) {
- union { loff_t off; unsigned arg[2]; } o = { off };
- return LSS_NAME(_pwrite64)(fd, buf, count, o.arg[0], o.arg[1]);
- }
- LSS_INLINE int LSS_NAME(readahead)(int fd, loff_t off, int len) {
- union { loff_t off; unsigned arg[2]; } o = { off };
- return LSS_NAME(_readahead)(fd, o.arg[0], o.arg[1], len);
- }
- #endif
#endif
#if defined(__cplusplus) && !defined(SYS_CPLUSPLUS)
diff --git a/third_party/tcmalloc/chromium/src/base/logging.h b/third_party/tcmalloc/chromium/src/base/logging.h
index b24a030..d06d6a6 100644
--- a/third_party/tcmalloc/chromium/src/base/logging.h
+++ b/third_party/tcmalloc/chromium/src/base/logging.h
@@ -85,7 +85,7 @@ DECLARE_int32(verbose);
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition "\n", \
sizeof("Check failed: " #condition "\n")-1); \
- exit(1); \
+ abort(); \
} \
} while (0)
@@ -95,7 +95,7 @@ DECLARE_int32(verbose);
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition ": " message "\n", \
sizeof("Check failed: " #condition ": " message "\n")-1);\
- exit(1); \
+ abort(); \
} \
} while (0)
@@ -118,7 +118,7 @@ enum { DEBUG_MODE = 1 };
sizeof("Check failed: " #condition ": ")-1); \
WRITE_TO_STDERR(strerror(err_no), strlen(strerror(err_no))); \
WRITE_TO_STDERR("\n", sizeof("\n")-1); \
- exit(1); \
+ abort(); \
} \
} while (0)
@@ -135,7 +135,7 @@ enum { DEBUG_MODE = 1 };
do { \
if (!((val1) op (val2))) { \
fprintf(stderr, "Check failed: %s %s %s\n", #val1, #op, #val2); \
- exit(1); \
+ abort(); \
} \
} while (0)
diff --git a/third_party/tcmalloc/chromium/src/base/low_level_alloc.cc b/third_party/tcmalloc/chromium/src/base/low_level_alloc.cc
index 532c594..c043cb6 100644
--- a/third_party/tcmalloc/chromium/src/base/low_level_alloc.cc
+++ b/third_party/tcmalloc/chromium/src/base/low_level_alloc.cc
@@ -38,7 +38,7 @@
#include "base/spinlock.h"
#include "base/logging.h"
#include "malloc_hook-inl.h"
-#include <google/malloc_hook.h>
+#include <gperftools/malloc_hook.h>
#include <errno.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
@@ -233,7 +233,7 @@ namespace {
this->arena_->mu.Lock();
}
~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
- void Leave() UNLOCK_FUNCTION() {
+ void Leave() /*UNLOCK_FUNCTION()*/ {
this->arena_->mu.Unlock();
#if 0
if (this->mask_valid_) {
diff --git a/third_party/tcmalloc/chromium/src/base/spinlock_internal.cc b/third_party/tcmalloc/chromium/src/base/spinlock_internal.cc
index b5b6ca4..b9fadde 100644
--- a/third_party/tcmalloc/chromium/src/base/spinlock_internal.cc
+++ b/third_party/tcmalloc/chromium/src/base/spinlock_internal.cc
@@ -42,6 +42,9 @@
#include "base/spinlock_internal.h"
+// forward declaration for use by spinlock_*-inl.h
+namespace base { namespace internal { static int SuggestedDelayNS(int loop); }}
+
#if defined(_WIN32)
#include "base/spinlock_win32-inl.h"
#elif defined(__linux__)
@@ -73,5 +76,27 @@ int32 SpinLockWait(volatile Atomic32 *w, int n,
return v;
}
+// Return a suggested delay in nanoseconds for iteration number "loop"
+static int SuggestedDelayNS(int loop) {
+ // Weak pseudo-random number generator to get some spread between threads
+ // when many are spinning.
+ static base::subtle::Atomic64 rand;
+ uint64 r = base::subtle::NoBarrier_Load(&rand);
+ r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
+ base::subtle::NoBarrier_Store(&rand, r);
+
+ r <<= 16; // 48-bit random number now in top 48-bits.
+ if (loop < 0 || loop > 32) { // limit loop to 0..32
+ loop = 32;
+ }
+ // loop>>3 cannot exceed 4 because loop cannot exceed 32.
+ // Select top 20..24 bits of lower 48 bits,
+ // giving approximately 0ms to 16ms.
+ // Mean is exponential in loop for first 32 iterations, then 8ms.
+ // The futex path multiplies this by 16, since we expect explicit wakeups
+ // almost always on that path.
+ return r >> (44 - (loop >> 3));
+}
+
} // namespace internal
} // namespace base
diff --git a/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h b/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h
index dc2c6ba..c190d52 100644
--- a/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h
+++ b/third_party/tcmalloc/chromium/src/base/spinlock_linux-inl.h
@@ -31,6 +31,7 @@
* This file is a Linux-specific part of spinlock_internal.cc
*/
+#include <errno.h>
#include <sched.h>
#include <time.h>
#include <limits.h>
@@ -86,12 +87,12 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
struct timespec tm;
tm.tv_sec = 0;
if (have_futex) {
- tm.tv_nsec = 1000000; // 1ms; really we're trying to sleep for one
- // kernel clock tick
+ tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
} else {
tm.tv_nsec = 2000001; // above 2ms so linux 2.4 doesn't spin
}
if (have_futex) {
+ tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
syscall(__NR_futex, reinterpret_cast<int *>(const_cast<Atomic32 *>(w)),
FUTEX_WAIT | futex_private_flag,
value, reinterpret_cast<struct kernel_timespec *>(&tm));
diff --git a/third_party/tcmalloc/chromium/src/base/spinlock_posix-inl.h b/third_party/tcmalloc/chromium/src/base/spinlock_posix-inl.h
index d188ebd..e1d43b7 100644
--- a/third_party/tcmalloc/chromium/src/base/spinlock_posix-inl.h
+++ b/third_party/tcmalloc/chromium/src/base/spinlock_posix-inl.h
@@ -49,7 +49,7 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
} else {
struct timespec tm;
tm.tv_sec = 0;
- tm.tv_nsec = 1000000;
+ tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
nanosleep(&tm, NULL);
}
errno = save_errno;
diff --git a/third_party/tcmalloc/chromium/src/base/spinlock_win32-inl.h b/third_party/tcmalloc/chromium/src/base/spinlock_win32-inl.h
index ee23541..9e77311 100644
--- a/third_party/tcmalloc/chromium/src/base/spinlock_win32-inl.h
+++ b/third_party/tcmalloc/chromium/src/base/spinlock_win32-inl.h
@@ -42,6 +42,13 @@ void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) {
} else if (loop == 1) {
Sleep(0);
} else {
+ // TODO(dmikurube): Re-enable the commented-out code.
+ // We commented out the following line and used the old code "Sleep(1)"
+ // since base/atomicops-internals-windows.h doesn't support 64-bit
+ // operations.
+ //
+ // Commended-out code:
+ // Sleep(base::internal::SuggestedDelayNS(loop) / 1000000);
Sleep(1);
}
}
diff --git a/third_party/tcmalloc/chromium/src/base/stl_allocator.h b/third_party/tcmalloc/chromium/src/base/stl_allocator.h
index 22bd4ae..f7adb68 100644
--- a/third_party/tcmalloc/chromium/src/base/stl_allocator.h
+++ b/third_party/tcmalloc/chromium/src/base/stl_allocator.h
@@ -87,6 +87,7 @@ class STL_Allocator {
size_type max_size() const { return size_t(-1) / sizeof(T); }
void construct(pointer p, const T& val) { ::new(p) T(val); }
+ void construct(pointer p) { ::new(p) T(); }
void destroy(pointer p) { p->~T(); }
// There's no state, so these allocators are always equal
diff --git a/third_party/tcmalloc/chromium/src/base/sysinfo.cc b/third_party/tcmalloc/chromium/src/base/sysinfo.cc
index 285630e..3a1873e 100644
--- a/third_party/tcmalloc/chromium/src/base/sysinfo.cc
+++ b/third_party/tcmalloc/chromium/src/base/sysinfo.cc
@@ -86,12 +86,20 @@
// time, so prefer making the syscalls directly if we can.
#ifdef HAVE_SYS_SYSCALL_H
# include <sys/syscall.h>
+#endif
+#ifdef SYS_open // solaris 11, at least sometimes, only defines SYS_openat
# define safeopen(filename, mode) syscall(SYS_open, filename, mode)
-# define saferead(fd, buffer, size) syscall(SYS_read, fd, buffer, size)
-# define safeclose(fd) syscall(SYS_close, fd)
#else
# define safeopen(filename, mode) open(filename, mode)
+#endif
+#ifdef SYS_read
+# define saferead(fd, buffer, size) syscall(SYS_read, fd, buffer, size)
+#else
# define saferead(fd, buffer, size) read(fd, buffer, size)
+#endif
+#ifdef SYS_close
+# define safeclose(fd) syscall(SYS_close, fd)
+#else
# define safeclose(fd) close(fd)
#endif
diff --git a/third_party/tcmalloc/chromium/src/base/vdso_support.cc b/third_party/tcmalloc/chromium/src/base/vdso_support.cc
index 444be26..c98d9fa 100644
--- a/third_party/tcmalloc/chromium/src/base/vdso_support.cc
+++ b/third_party/tcmalloc/chromium/src/base/vdso_support.cc
@@ -43,8 +43,8 @@
#include <stddef.h> // for std::ptrdiff_t
#include "base/atomicops.h" // for MemoryBarrier
-#include "base/logging.h"
#include "base/linux_syscall_support.h"
+#include "base/logging.h"
#include "base/dynamic_annotations.h"
#include "base/basictypes.h" // for COMPILE_ASSERT
@@ -54,285 +54,14 @@ using base::subtle::MemoryBarrier;
#define AT_SYSINFO_EHDR 33
#endif
-// From binutils/include/elf/common.h (this doesn't appear to be documented
-// anywhere else).
-//
-// /* This flag appears in a Versym structure. It means that the symbol
-// is hidden, and is only visible with an explicit version number.
-// This is a GNU extension. */
-// #define VERSYM_HIDDEN 0x8000
-//
-// /* This is the mask for the rest of the Versym information. */
-// #define VERSYM_VERSION 0x7fff
-
-#define VERSYM_VERSION 0x7fff
-
namespace base {
-namespace {
-template <int N> class ElfClass {
- public:
- static const int kElfClass = -1;
- static int ElfBind(const ElfW(Sym) *) {
- CHECK(false); // << "Unexpected word size";
- return 0;
- }
- static int ElfType(const ElfW(Sym) *) {
- CHECK(false); // << "Unexpected word size";
- return 0;
- }
-};
-
-template <> class ElfClass<32> {
- public:
- static const int kElfClass = ELFCLASS32;
- static int ElfBind(const ElfW(Sym) *symbol) {
- return ELF32_ST_BIND(symbol->st_info);
- }
- static int ElfType(const ElfW(Sym) *symbol) {
- return ELF32_ST_TYPE(symbol->st_info);
- }
-};
-
-template <> class ElfClass<64> {
- public:
- static const int kElfClass = ELFCLASS64;
- static int ElfBind(const ElfW(Sym) *symbol) {
- return ELF64_ST_BIND(symbol->st_info);
- }
- static int ElfType(const ElfW(Sym) *symbol) {
- return ELF64_ST_TYPE(symbol->st_info);
- }
-};
-
-typedef ElfClass<__WORDSIZE> CurrentElfClass;
-
-// Extract an element from one of the ELF tables, cast it to desired type.
-// This is just a simple arithmetic and a glorified cast.
-// Callers are responsible for bounds checking.
-template <class T>
-const T* GetTableElement(const ElfW(Ehdr) *ehdr,
- ElfW(Off) table_offset,
- ElfW(Word) element_size,
- size_t index) {
- return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
- + table_offset
- + index * element_size);
-}
-} // namespace
-
-const void *const VDSOSupport::kInvalidBase =
- reinterpret_cast<const void *>(~0L);
-
-const void *VDSOSupport::vdso_base_ = kInvalidBase;
+const void *VDSOSupport::vdso_base_ = ElfMemImage::kInvalidBase;
VDSOSupport::GetCpuFn VDSOSupport::getcpu_fn_ = &InitAndGetCPU;
-
-VDSOSupport::ElfMemImage::ElfMemImage(const void *base) {
- CHECK(base != kInvalidBase);
- Init(base);
-}
-
-int VDSOSupport::ElfMemImage::GetNumSymbols() const {
- if (!hash_) {
- return 0;
- }
- // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
- return hash_[1];
-}
-
-const ElfW(Sym) *VDSOSupport::ElfMemImage::GetDynsym(int index) const {
- CHECK_LT(index, GetNumSymbols());
- return dynsym_ + index;
-}
-
-const ElfW(Versym) *VDSOSupport::ElfMemImage::GetVersym(int index) const {
- CHECK_LT(index, GetNumSymbols());
- return versym_ + index;
-}
-
-const ElfW(Phdr) *VDSOSupport::ElfMemImage::GetPhdr(int index) const {
- CHECK_LT(index, ehdr_->e_phnum);
- return GetTableElement<ElfW(Phdr)>(ehdr_,
- ehdr_->e_phoff,
- ehdr_->e_phentsize,
- index);
-}
-
-const char *VDSOSupport::ElfMemImage::GetDynstr(ElfW(Word) offset) const {
- CHECK_LT(offset, strsize_);
- return dynstr_ + offset;
-}
-
-const void *VDSOSupport::ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
- if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
- // Symbol corresponds to "special" (e.g. SHN_ABS) section.
- return reinterpret_cast<const void *>(sym->st_value);
- }
- CHECK_LT(link_base_, sym->st_value);
- return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_;
-}
-
-const ElfW(Verdef) *VDSOSupport::ElfMemImage::GetVerdef(int index) const {
- CHECK_LE(index, verdefnum_);
- const ElfW(Verdef) *version_definition = verdef_;
- while (version_definition->vd_ndx < index && version_definition->vd_next) {
- const char *const version_definition_as_char =
- reinterpret_cast<const char *>(version_definition);
- version_definition =
- reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
- version_definition->vd_next);
- }
- return version_definition->vd_ndx == index ? version_definition : NULL;
-}
-
-const ElfW(Verdaux) *VDSOSupport::ElfMemImage::GetVerdefAux(
- const ElfW(Verdef) *verdef) const {
- return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
-}
-
-const char *VDSOSupport::ElfMemImage::GetVerstr(ElfW(Word) offset) const {
- CHECK_LT(offset, strsize_);
- return dynstr_ + offset;
-}
-
-void VDSOSupport::ElfMemImage::Init(const void *base) {
- ehdr_ = NULL;
- dynsym_ = NULL;
- dynstr_ = NULL;
- versym_ = NULL;
- verdef_ = NULL;
- hash_ = NULL;
- strsize_ = 0;
- verdefnum_ = 0;
- link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
- if (!base) {
- return;
- }
- const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base);
- // Fake VDSO has low bit set.
- const bool fake_vdso = ((base_as_uintptr_t & 1) != 0);
- base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1);
- const char *const base_as_char = reinterpret_cast<const char *>(base);
- if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
- base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
- RAW_DCHECK(false, "no ELF magic"); // at %p", base);
- return;
- }
- int elf_class = base_as_char[EI_CLASS];
- if (elf_class != CurrentElfClass::kElfClass) {
- DCHECK_EQ(elf_class, CurrentElfClass::kElfClass);
- return;
- }
- switch (base_as_char[EI_DATA]) {
- case ELFDATA2LSB: {
- if (__LITTLE_ENDIAN != __BYTE_ORDER) {
- DCHECK_EQ(__LITTLE_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
- return;
- }
- break;
- }
- case ELFDATA2MSB: {
- if (__BIG_ENDIAN != __BYTE_ORDER) {
- DCHECK_EQ(__BIG_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
- return;
- }
- break;
- }
- default: {
- RAW_DCHECK(false, "unexpected data encoding"); // << base_as_char[EI_DATA];
- return;
- }
- }
-
- ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
- const ElfW(Phdr) *dynamic_program_header = NULL;
- for (int i = 0; i < ehdr_->e_phnum; ++i) {
- const ElfW(Phdr) *const program_header = GetPhdr(i);
- switch (program_header->p_type) {
- case PT_LOAD:
- if (link_base_ == ~0L) {
- link_base_ = program_header->p_vaddr;
- }
- break;
- case PT_DYNAMIC:
- dynamic_program_header = program_header;
- break;
- }
- }
- if (link_base_ == ~0L || !dynamic_program_header) {
- RAW_DCHECK(~0L != link_base_, "no PT_LOADs in VDSO");
- RAW_DCHECK(dynamic_program_header, "no PT_DYNAMIC in VDSO");
- // Mark this image as not present. Can not recur infinitely.
- Init(0);
- return;
- }
- std::ptrdiff_t relocation =
- base_as_char - reinterpret_cast<const char *>(link_base_);
- ElfW(Dyn) *dynamic_entry =
- reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
- relocation);
- for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
- ElfW(Xword) value = dynamic_entry->d_un.d_val;
- if (fake_vdso) {
- // A complication: in the real VDSO, dynamic entries are not relocated
- // (it wasn't loaded by a dynamic loader). But when testing with a
- // "fake" dlopen()ed vdso library, the loader relocates some (but
- // not all!) of them before we get here.
- if (dynamic_entry->d_tag == DT_VERDEF) {
- // The only dynamic entry (of the ones we care about) libc-2.3.6
- // loader doesn't relocate.
- value += relocation;
- }
- } else {
- // Real VDSO. Everything needs to be relocated.
- value += relocation;
- }
- switch (dynamic_entry->d_tag) {
- case DT_HASH:
- hash_ = reinterpret_cast<ElfW(Word) *>(value);
- break;
- case DT_SYMTAB:
- dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
- break;
- case DT_STRTAB:
- dynstr_ = reinterpret_cast<const char *>(value);
- break;
- case DT_VERSYM:
- versym_ = reinterpret_cast<ElfW(Versym) *>(value);
- break;
- case DT_VERDEF:
- verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
- break;
- case DT_VERDEFNUM:
- verdefnum_ = dynamic_entry->d_un.d_val;
- break;
- case DT_STRSZ:
- strsize_ = dynamic_entry->d_un.d_val;
- break;
- default:
- // Unrecognized entries explicitly ignored.
- break;
- }
- }
- if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
- !verdef_ || !verdefnum_ || !strsize_) {
- RAW_DCHECK(hash_, "invalid VDSO (no DT_HASH)");
- RAW_DCHECK(dynsym_, "invalid VDSO (no DT_SYMTAB)");
- RAW_DCHECK(dynstr_, "invalid VDSO (no DT_STRTAB)");
- RAW_DCHECK(versym_, "invalid VDSO (no DT_VERSYM)");
- RAW_DCHECK(verdef_, "invalid VDSO (no DT_VERDEF)");
- RAW_DCHECK(verdefnum_, "invalid VDSO (no DT_VERDEFNUM)");
- RAW_DCHECK(strsize_, "invalid VDSO (no DT_STRSZ)");
- // Mark this image as not present. Can not recur infinitely.
- Init(0);
- return;
- }
-}
-
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
// before VDSOSupport::Init has been called. Call it now.
- : image_(vdso_base_ == kInvalidBase ? Init() : vdso_base_) {
+ : image_(vdso_base_ == ElfMemImage::kInvalidBase ? Init() : vdso_base_) {
}
// NOTE: we can't use GoogleOnceInit() below, because we can be
@@ -345,7 +74,7 @@ VDSOSupport::VDSOSupport()
// Finally, even if there is a race here, it is harmless, because
// the operation should be idempotent.
const void *VDSOSupport::Init() {
- if (vdso_base_ == kInvalidBase) {
+ if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
// on stack, and so glibc works as if VDSO was not present.
// But going directly to kernel via /proc/self/auxv below bypasses
@@ -372,7 +101,7 @@ const void *VDSOSupport::Init() {
}
}
close(fd);
- if (vdso_base_ == kInvalidBase) {
+ if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Didn't find AT_SYSINFO_EHDR in auxv[].
vdso_base_ = NULL;
}
@@ -395,6 +124,7 @@ const void *VDSOSupport::Init() {
}
const void *VDSOSupport::SetBase(const void *base) {
+ CHECK(base != ElfMemImage::kInvalidBase);
const void *old_base = vdso_base_;
vdso_base_ = base;
image_.Init(base);
@@ -407,116 +137,12 @@ bool VDSOSupport::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
- for (SymbolIterator it = begin(); it != end(); ++it) {
- if (strcmp(it->name, name) == 0 && strcmp(it->version, version) == 0 &&
- CurrentElfClass::ElfType(it->symbol) == type) {
- if (info) {
- *info = *it;
- }
- return true;
- }
- }
- return false;
+ return image_.LookupSymbol(name, version, type, info);
}
bool VDSOSupport::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
- for (SymbolIterator it = begin(); it != end(); ++it) {
- const char *const symbol_start =
- reinterpret_cast<const char *>(it->address);
- const char *const symbol_end = symbol_start + it->symbol->st_size;
- if (symbol_start <= address && address < symbol_end) {
- if (info_out) {
- // Client wants to know details for that symbol (the usual case).
- if (CurrentElfClass::ElfBind(it->symbol) == STB_GLOBAL) {
- // Strong symbol; just return it.
- *info_out = *it;
- return true;
- } else {
- // Weak or local. Record it, but keep looking for a strong one.
- *info_out = *it;
- }
- } else {
- // Client only cares if there is an overlapping symbol.
- return true;
- }
- }
- }
- return false;
-}
-
-VDSOSupport::SymbolIterator::SymbolIterator(const void *const image, int index)
- : index_(index), image_(image) {
-}
-
-const VDSOSupport::SymbolInfo *VDSOSupport::SymbolIterator::operator->() const {
- return &info_;
-}
-
-const VDSOSupport::SymbolInfo& VDSOSupport::SymbolIterator::operator*() const {
- return info_;
-}
-
-bool VDSOSupport::SymbolIterator::operator==(const SymbolIterator &rhs) const {
- return this->image_ == rhs.image_ && this->index_ == rhs.index_;
-}
-
-bool VDSOSupport::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
- return !(*this == rhs);
-}
-
-VDSOSupport::SymbolIterator &VDSOSupport::SymbolIterator::operator++() {
- this->Update(1);
- return *this;
-}
-
-VDSOSupport::SymbolIterator VDSOSupport::begin() const {
- SymbolIterator it(&image_, 0);
- it.Update(0);
- return it;
-}
-
-VDSOSupport::SymbolIterator VDSOSupport::end() const {
- return SymbolIterator(&image_, image_.GetNumSymbols());
-}
-
-void VDSOSupport::SymbolIterator::Update(int increment) {
- const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
- CHECK(image->IsPresent() || increment == 0);
- if (!image->IsPresent()) {
- return;
- }
- index_ += increment;
- if (index_ >= image->GetNumSymbols()) {
- index_ = image->GetNumSymbols();
- return;
- }
- const ElfW(Sym) *symbol = image->GetDynsym(index_);
- const ElfW(Versym) *version_symbol = image->GetVersym(index_);
- CHECK(symbol && version_symbol);
- const char *const symbol_name = image->GetDynstr(symbol->st_name);
- const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
- const ElfW(Verdef) *version_definition = NULL;
- const char *version_name = "";
- if (symbol->st_shndx == SHN_UNDEF) {
- // Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
- // version_index could well be greater than verdefnum_, so calling
- // GetVerdef(version_index) may trigger assertion.
- } else {
- version_definition = image->GetVerdef(version_index);
- }
- if (version_definition) {
- // I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
- // optional 2nd if the version has a parent.
- CHECK_LE(1, version_definition->vd_cnt);
- CHECK_LE(version_definition->vd_cnt, 2);
- const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
- version_name = image->GetVerstr(version_aux->vda_name);
- }
- info_.name = symbol_name;
- info_.version = version_name;
- info_.address = image->GetSymAddr(symbol);
- info_.symbol = symbol;
+ return image_.LookupSymbolByAddress(address, info_out);
}
// NOLINT on 'long' because this routine mimics kernel api.
diff --git a/third_party/tcmalloc/chromium/src/base/vdso_support.h b/third_party/tcmalloc/chromium/src/base/vdso_support.h
index 131646a..b97ab25 100644
--- a/third_party/tcmalloc/chromium/src/base/vdso_support.h
+++ b/third_party/tcmalloc/chromium/src/base/vdso_support.h
@@ -1,5 +1,34 @@
-// Copyright 2008 Google Inc. All Rights Reserved.
-// Author: ppluzhnikov@google.com (Paul Pluzhnikov)
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in the kernel VDSO page.
//
@@ -27,19 +56,14 @@
#define BASE_VDSO_SUPPORT_H_
#include <config.h>
-#ifdef HAVE_FEATURES_H
-#include <features.h> // for __GLIBC__
-#endif
#include "base/basictypes.h"
+#include "base/elf_mem_image.h"
-// Maybe one day we can rewrite this file not to require the elf
-// symbol extensions in glibc, but for right now we need them.
-#if defined(__ELF__) && defined(__GLIBC__)
+#ifdef HAVE_ELF_MEM_IMAGE
#define HAVE_VDSO_SUPPORT 1
#include <stdlib.h> // for NULL
-#include <link.h> // for ElfW
namespace base {
@@ -47,45 +71,17 @@ namespace base {
// use any memory allocation routines.
class VDSOSupport {
public:
- // Sentinel: there could never be a VDSO at this address.
- static const void *const kInvalidBase;
-
- // Information about a single vdso symbol.
- // All pointers are into .dynsym, .dynstr, or .text of the VDSO.
- // Do not free() them or modify through them.
- struct SymbolInfo {
- const char *name; // E.g. "__vdso_getcpu"
- const char *version; // E.g. "LINUX_2.6", could be ""
- // for unversioned symbol.
- const void *address; // Relocated symbol address.
- const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
- };
-
- // Supports iteration over all dynamic symbols.
- class SymbolIterator {
- public:
- friend class VDSOSupport;
- const SymbolInfo *operator->() const;
- const SymbolInfo &operator*() const;
- SymbolIterator& operator++();
- bool operator!=(const SymbolIterator &rhs) const;
- bool operator==(const SymbolIterator &rhs) const;
- private:
- SymbolIterator(const void *const image, int index);
- void Update(int incr);
- SymbolInfo info_;
- int index_;
- const void *const image_;
- };
-
VDSOSupport();
+ typedef ElfMemImage::SymbolInfo SymbolInfo;
+ typedef ElfMemImage::SymbolIterator SymbolIterator;
+
// Answers whether we have a vdso at all.
bool IsPresent() const { return image_.IsPresent(); }
// Allow to iterate over all VDSO symbols.
- SymbolIterator begin() const;
- SymbolIterator end() const;
+ SymbolIterator begin() const { return image_.begin(); }
+ SymbolIterator end() const { return image_.end(); }
// Look up versioned dynamic symbol in the kernel VDSO.
// Returns false if VDSO is not present, or doesn't contain given
@@ -111,33 +107,6 @@ class VDSOSupport {
static const void *Init();
private:
- // An in-memory ELF image (may not exist on disk).
- class ElfMemImage {
- public:
- explicit ElfMemImage(const void *base);
- void Init(const void *base);
- bool IsPresent() const { return ehdr_ != NULL; }
- const ElfW(Phdr)* GetPhdr(int index) const;
- const ElfW(Sym)* GetDynsym(int index) const;
- const ElfW(Versym)* GetVersym(int index) const;
- const ElfW(Verdef)* GetVerdef(int index) const;
- const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
- const char* GetDynstr(ElfW(Word) offset) const;
- const void* GetSymAddr(const ElfW(Sym) *sym) const;
- const char* GetVerstr(ElfW(Word) offset) const;
- int GetNumSymbols() const;
- private:
- const ElfW(Ehdr) *ehdr_;
- const ElfW(Sym) *dynsym_;
- const ElfW(Versym) *versym_;
- const ElfW(Verdef) *verdef_;
- const ElfW(Word) *hash_;
- const char *dynstr_;
- size_t strsize_;
- size_t verdefnum_;
- ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
- };
-
// image_ represents VDSO ELF image in memory.
// image_.ehdr_ == NULL implies there is no VDSO.
ElfMemImage image_;
@@ -181,6 +150,6 @@ class VDSOSupport {
int GetCPU();
} // namespace base
-#endif // __ELF__ and __GLIBC__
+#endif // HAVE_ELF_MEM_IMAGE
#endif // BASE_VDSO_SUPPORT_H_