summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorglider@chromium.org <glider@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-10-02 16:28:23 +0000
committerglider@chromium.org <glider@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-10-02 16:28:23 +0000
commit1e6d0a5934d87473678c5ca193664aff1964d86f (patch)
tree2785fb4b297291acdbb5de533677141eef6ae0c8
parent3173de14129a900edbfe9e3afab2caa0d0a3b723 (diff)
downloadchromium_src-1e6d0a5934d87473678c5ca193664aff1964d86f.zip
chromium_src-1e6d0a5934d87473678c5ca193664aff1964d86f.tar.gz
chromium_src-1e6d0a5934d87473678c5ca193664aff1964d86f.tar.bz2
Atomics implementation for compiler-based ThreadSanitizer (http://dev.chromium.org/developers/testing/threadsanitizer-tsan-v2)
BUG=128314 Review URL: https://codereview.chromium.org/10948035 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159705 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--base/atomicops.h4
-rw-r--r--base/atomicops_internals_tsan.h313
-rw-r--r--base/base.gypi1
3 files changed, 317 insertions, 1 deletions
diff --git a/base/atomicops.h b/base/atomicops.h
index b06c8c1..de5554e 100644
--- a/base/atomicops.h
+++ b/base/atomicops.h
@@ -128,7 +128,9 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} // namespace base
// Include our platform specific implementation.
-#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
+#if defined(THREAD_SANITIZER)
+#include "base/atomicops_internals_tsan.h"
+#elif defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
#include "base/atomicops_internals_x86_msvc.h"
#elif defined(OS_MACOSX)
#include "base/atomicops_internals_mac.h"
diff --git a/base/atomicops_internals_tsan.h b/base/atomicops_internals_tsan.h
new file mode 100644
index 0000000..94fe983
--- /dev/null
+++ b/base/atomicops_internals_tsan.h
@@ -0,0 +1,313 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
+#define BASE_ATOMICOPS_INTERNALS_TSAN_H_
+
+#include "base/base_export.h"
+
+// This struct is not part of the public API of this module; clients may not
+// use it. (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
+ AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace base {
+namespace subtle {
+
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+typedef enum {
+ __tsan_memory_order_relaxed = 1 << 0,
+ __tsan_memory_order_consume = 1 << 1,
+ __tsan_memory_order_acquire = 1 << 2,
+ __tsan_memory_order_release = 1 << 3,
+ __tsan_memory_order_acq_rel = 1 << 4,
+ __tsan_memory_order_seq_cst = 1 << 5,
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+ __tsan_memory_order mo);
+
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release);
+ return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release);
+ return cmp;
+}
+
+inline void MemoryBarrier() {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+} // namespace base::subtle
+} // namespace base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/base/base.gypi b/base/base.gypi
index 7ac1f18..e5c36f2 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -54,6 +54,7 @@
'atomicops.h',
'atomicops_internals_gcc.h',
'atomicops_internals_mac.h',
+ 'atomicops_internals_tsan.h',
'atomicops_internals_x86_gcc.cc',
'atomicops_internals_x86_gcc.h',
'atomicops_internals_x86_msvc.h',