summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-04-28 16:02:49 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-04-28 16:02:50 +0000
commit5dee5df89aa2cefef6c886d5b9b642cc6f1c595b (patch)
tree3a1fcbcdbf1ec71d332150819953a907bab08fd4 /runtime
parent53e5d10036a5d4fdf84b32354c11dd8f2140b8ab (diff)
parenta984454098971739a1469d62cba02cda3600268b (diff)
downloadart-5dee5df89aa2cefef6c886d5b9b642cc6f1c595b.zip
art-5dee5df89aa2cefef6c886d5b9b642cc6f1c595b.tar.gz
art-5dee5df89aa2cefef6c886d5b9b642cc6f1c595b.tar.bz2
Merge "Avoid volatile 64-bit tearing on 32-bit architectures."
Diffstat (limited to 'runtime')
-rw-r--r--runtime/atomic.h67
1 files changed, 63 insertions, 4 deletions
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 795f917..6867fef 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include <vector>
+#include "base/logging.h"
#include "base/macros.h"
namespace art {
@@ -110,18 +111,76 @@ class QuasiAtomic {
// Reads the 64-bit value at "addr" without tearing.
static int64_t Read64(volatile const int64_t* addr) {
if (!kNeedSwapMutexes) {
- return *addr;
+ int64_t value;
+#if defined(__LP64__)
+ value = *addr;
+#else
+#if defined(__arm__)
+#if defined(__ARM_FEATURE_LPAE)
+ // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear.
+ __asm__ __volatile__("@ QuasiAtomic::Read64\n"
+ "ldrd %0, %H0, %1"
+ : "=r" (value)
+ : "m" (*addr));
+#else
+ // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary.
+ __asm__ __volatile__("@ QuasiAtomic::Read64\n"
+ "ldrexd %0, %H0, %1"
+ : "=r" (value)
+ : "Q" (*addr));
+#endif
+#elif defined(__i386__)
+ __asm__ __volatile__(
+ "movq %1, %0\n"
+ : "=x" (value)
+ : "m" (*addr));
+#else
+ LOG(FATAL) << "Unsupported architecture";
+#endif
+#endif // defined(__LP64__)
+ return value;
} else {
return SwapMutexRead64(addr);
}
}
// Writes to the 64-bit value at "addr" without tearing.
- static void Write64(volatile int64_t* addr, int64_t val) {
+ static void Write64(volatile int64_t* addr, int64_t value) {
if (!kNeedSwapMutexes) {
- *addr = val;
+#if defined(__LP64__)
+ *addr = value;
+#else
+#if defined(__arm__)
+#if defined(__ARM_FEATURE_LPAE)
+ // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear.
+ __asm__ __volatile__("@ QuasiAtomic::Write64\n"
+ "strd %1, %H1, %0"
+ : "=m"(*addr)
+ : "r" (value));
+#else
+ // The write is done as a swap so that the cache-line is in the exclusive state for the store.
+ int64_t prev;
+ int status;
+ do {
+ __asm__ __volatile__("@ QuasiAtomic::Write64\n"
+ "ldrexd %0, %H0, %2\n"
+ "strexd %1, %3, %H3, %2"
+ : "=&r" (prev), "=&r" (status), "+Q"(*addr)
+ : "r" (value)
+ : "cc");
+ } while (UNLIKELY(status != 0));
+#endif
+#elif defined(__i386__)
+ __asm__ __volatile__(
+ "movq %1, %0"
+ : "=m" (*addr)
+ : "x" (value));
+#else
+ LOG(FATAL) << "Unsupported architecture";
+#endif
+#endif // defined(__LP64__)
} else {
- SwapMutexWrite64(addr, val);
+ SwapMutexWrite64(addr, value);
}
}