summaryrefslogtreecommitdiffstats
path: root/libc/private/bionic_atomic_inline.h
diff options
context:
space:
mode:
Diffstat (limited to 'libc/private/bionic_atomic_inline.h')
-rw-r--r--libc/private/bionic_atomic_inline.h59
1 files changed, 9 insertions, 50 deletions
diff --git a/libc/private/bionic_atomic_inline.h b/libc/private/bionic_atomic_inline.h
index 95766e1..821ad39 100644
--- a/libc/private/bionic_atomic_inline.h
+++ b/libc/private/bionic_atomic_inline.h
@@ -43,62 +43,21 @@
extern "C" {
#endif
-/*
- * Define the full memory barrier for an SMP system. This is
- * platform-specific.
+/* Define __ATOMIC_INLINE__ to control the inlining of all atomics
+ * functions declared here. For a slight performance boost, we want
+ * all of them to be always_inline
*/
+#define __ATOMIC_INLINE__ static __inline__ __attribute__((always_inline))
#ifdef __arm__
-#include <machine/cpu-features.h>
-
-/*
- * For ARMv6K we need to issue a specific MCR instead of the DMB, since
- * that wasn't added until v7. For anything older, SMP isn't relevant.
- * Since we don't have an ARMv6K to test with, we're not going to deal
- * with that now.
- *
- * The DMB instruction is found in the ARM and Thumb2 instruction sets.
- * This will fail on plain 16-bit Thumb.
- */
-#if defined(__ARM_HAVE_DMB)
-# define _ANDROID_MEMBAR_FULL_SMP() \
- do { __asm__ __volatile__ ("dmb" ::: "memory"); } while (0)
+# include <bionic_atomic_arm.h>
+#elif defined(__i386__)
+# include <bionic_atomic_x86.h>
#else
-# define _ANDROID_MEMBAR_FULL_SMP() ARM_SMP_defined_but_no_DMB()
+# include <bionic_atomic_gcc_builtin.h>
#endif
-#elif defined(__i386__) || defined(__x86_64__)
-/*
- * For recent x86, we can use the SSE2 mfence instruction.
- */
-# define _ANDROID_MEMBAR_FULL_SMP() \
- do { __asm__ __volatile__ ("mfence" ::: "memory"); } while (0)
-
-#else
-/*
- * Implementation not defined for this platform. Hopefully we're building
- * in uniprocessor mode.
- */
-# define _ANDROID_MEMBAR_FULL_SMP() SMP_barrier_not_defined_for_platform()
-#endif
-
-
-/*
- * Full barrier. On uniprocessors this is just a compiler reorder barrier,
- * which ensures that the statements appearing above the barrier in the C/C++
- * code will be issued after the statements appearing below the barrier.
- *
- * For SMP this also includes a memory barrier instruction. On an ARM
- * CPU this means that the current core will flush pending writes, wait
- * for pending reads to complete, and discard any cached reads that could
- * be stale. Other CPUs may do less, but the end result is equivalent.
- */
-#if ANDROID_SMP != 0
-# define ANDROID_MEMBAR_FULL() _ANDROID_MEMBAR_FULL_SMP()
-#else
-# define ANDROID_MEMBAR_FULL() \
- do { __asm__ __volatile__ ("" ::: "memory"); } while (0)
-#endif
+#define ANDROID_MEMBAR_FULL __bionic_memory_barrier
#ifdef __cplusplus
} // extern "C"