diff options
-rw-r--r-- | base/atomicops.h | 2 | ||||
-rw-r--r-- | base/atomicops_internals_arm_gcc.h | 124 | ||||
-rw-r--r-- | build/build_config.h | 8 |
3 files changed, 133 insertions, 1 deletions
diff --git a/base/atomicops.h b/base/atomicops.h index e481fbb..728b39b 100644 --- a/base/atomicops.h +++ b/base/atomicops.h @@ -130,6 +130,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); #include "base/atomicops_internals_x86_macosx.h" #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY) #include "base/atomicops_internals_x86_gcc.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY) +#include "base/atomicops_internals_arm_gcc.h" #else #error "Atomic operations are not supported on your platform" #endif diff --git a/base/atomicops_internals_arm_gcc.h b/base/atomicops_internals_arm_gcc.h new file mode 100644 index 0000000..e838f1b --- /dev/null +++ b/base/atomicops_internals_arm_gcc.h @@ -0,0 +1,124 @@ +// Copyright (c) 2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use base/atomicops.h instead. +// +// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. + +#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ +#define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ + +namespace base { +namespace subtle { + +// 0xffff0fc0 is the hard coded address of a function provided by +// the kernel which implements an atomic compare-exchange. On older +// ARM architecture revisions (pre-v6) this may be implemented using +// a syscall. This address is stable, and in active use (hard coded) +// by at least glibc-2.7 and the Android C library. +typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, + Atomic32 new_value, + volatile Atomic32* ptr); +LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = + (LinuxKernelCmpxchgFunc) 0xffff0fc0; + +typedef void (*LinuxKernelMemoryBarrierFunc)(void); +LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = + (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; + + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev_value = *ptr; + do { + if (!pLinuxKernelCmpxchg(old_value, new_value, + const_cast<Atomic32*>(ptr))) { + return old_value; + } + prev_value = *ptr; + } while (prev_value == old_value); + return prev_value; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; + do { + old_value = *ptr; + } while (pLinuxKernelCmpxchg(old_value, new_value, + const_cast<Atomic32*>(ptr))); + return old_value; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return Barrier_AtomicIncrement(ptr, increment); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + for (;;) { + // Atomic exchange the old value with an incremented one. + Atomic32 old_value = *ptr; + Atomic32 new_value = old_value + increment; + if (pLinuxKernelCmpxchg(old_value, new_value, + const_cast<Atomic32*>(ptr)) == 0) { + // The exchange took place as expected. + return new_value; + } + // Otherwise, *ptr changed mid-loop and we need to retry. + } + +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void MemoryBarrier() { + pLinuxKernelMemoryBarrier(); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +} // namespace base::subtle +} // namespace base + +#endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ diff --git a/build/build_config.h b/build/build_config.h index c4c8ed7..4d4295b 100644 --- a/build/build_config.h +++ b/build/build_config.h @@ -43,6 +43,7 @@ // Processor architecture detection. For more info on what's defined, see: // http://msdn.microsoft.com/en-us/library/b0084kay.aspx // http://www.agner.org/optimize/calling_conventions.pdf +// or with gcc, run: "echo | gcc -E -dM -" #if defined(_M_X64) || defined(__x86_64__) #define ARCH_CPU_X86_FAMILY 1 #define ARCH_CPU_X86_64 1 @@ -51,6 +52,10 @@ #define ARCH_CPU_X86_FAMILY 1 #define ARCH_CPU_X86 1 #define ARCH_CPU_32_BITS 1 +#elif defined(__ARMEL__) +#define ARCH_CPU_ARM_FAMILY 1 +#define ARCH_CPU_ARMEL 1 +#define ARCH_CPU_32_BITS 1 #else #error Please add support for your architecture in build/build_config.h #endif @@ -59,7 +64,8 @@ #if defined(OS_WIN) #define WCHAR_T_IS_UTF16 #elif defined(OS_POSIX) && defined(COMPILER_GCC) && \ - defined(__WCHAR_MAX__) && __WCHAR_MAX__ == 0x7fffffff + defined(__WCHAR_MAX__) && \ + (__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff) #define WCHAR_T_IS_UTF32 #else #error Please add support for your compiler in build/build_config.h |