diff options
author | Ian Rogers <irogers@google.com> | 2014-04-21 17:01:02 -0700 |
---|---|---|
committer | Ian Rogers <irogers@google.com> | 2014-04-28 09:00:34 -0700 |
commit | a984454098971739a1469d62cba02cda3600268b (patch) | |
tree | 60b69e4b189bd3a3d0c374c7eccc760648aac295 /runtime/atomic.h | |
parent | 96a4f29350bf279d48bff70e21e3264cce216683 (diff) | |
download | art-a984454098971739a1469d62cba02cda3600268b.zip art-a984454098971739a1469d62cba02cda3600268b.tar.gz art-a984454098971739a1469d62cba02cda3600268b.tar.bz2 |
Avoid volatile 64-bit tearing on 32-bit architectures.
Change b122a4bbed34ab22b4c1541ee25e5cf22f12a926 removed inline assembly for
volatile 64bit read/writes. This isn't sound in the general case, reinstate.
Motivating change: https://android-review.googlesource.com/91250
Add optimizations for ARM in the case of LPAE support.
Change-Id: Ie86d8885d27c8f0da75f0c3bd50d4553a331282f
Diffstat (limited to 'runtime/atomic.h')
-rw-r--r-- | runtime/atomic.h | 67 |
1 files changed, 63 insertions, 4 deletions
diff --git a/runtime/atomic.h b/runtime/atomic.h index 795f917..6867fef 100644 --- a/runtime/atomic.h +++ b/runtime/atomic.h @@ -20,6 +20,7 @@ #include <stdint.h> #include <vector> +#include "base/logging.h" #include "base/macros.h" namespace art { @@ -110,18 +111,76 @@ class QuasiAtomic { // Reads the 64-bit value at "addr" without tearing. static int64_t Read64(volatile const int64_t* addr) { if (!kNeedSwapMutexes) { - return *addr; + int64_t value; +#if defined(__LP64__) + value = *addr; +#else +#if defined(__arm__) +#if defined(__ARM_FEATURE_LPAE) + // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear. + __asm__ __volatile__("@ QuasiAtomic::Read64\n" + "ldrd %0, %H0, %1" + : "=r" (value) + : "m" (*addr)); +#else + // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary. + __asm__ __volatile__("@ QuasiAtomic::Read64\n" + "ldrexd %0, %H0, %1" + : "=r" (value) + : "Q" (*addr)); +#endif +#elif defined(__i386__) + __asm__ __volatile__( + "movq %1, %0\n" + : "=x" (value) + : "m" (*addr)); +#else + LOG(FATAL) << "Unsupported architecture"; +#endif +#endif // defined(__LP64__) + return value; } else { return SwapMutexRead64(addr); } } // Writes to the 64-bit value at "addr" without tearing. - static void Write64(volatile int64_t* addr, int64_t val) { + static void Write64(volatile int64_t* addr, int64_t value) { if (!kNeedSwapMutexes) { - *addr = val; +#if defined(__LP64__) + *addr = value; +#else +#if defined(__arm__) +#if defined(__ARM_FEATURE_LPAE) + // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear. + __asm__ __volatile__("@ QuasiAtomic::Write64\n" + "strd %1, %H1, %0" + : "=m"(*addr) + : "r" (value)); +#else + // The write is done as a swap so that the cache-line is in the exclusive state for the store. + int64_t prev; + int status; + do { + __asm__ __volatile__("@ QuasiAtomic::Write64\n" + "ldrexd %0, %H0, %2\n" + "strexd %1, %3, %H3, %2" + : "=&r" (prev), "=&r" (status), "+Q"(*addr) + : "r" (value) + : "cc"); + } while (UNLIKELY(status != 0)); +#endif +#elif defined(__i386__) + __asm__ __volatile__( + "movq %1, %0" + : "=m" (*addr) + : "x" (value)); +#else + LOG(FATAL) << "Unsupported architecture"; +#endif +#endif // defined(__LP64__) } else { - SwapMutexWrite64(addr, val); + SwapMutexWrite64(addr, value); } } |