summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
authorctruta@blackberry.com <ctruta@blackberry.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-08 04:59:55 +0000
committerctruta@blackberry.com <ctruta@blackberry.com@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-08 04:59:55 +0000
commit9267710a4799c6fc0fb39e9add0871e2e7b9b6a9 (patch)
tree7dcee91965a9fd1171a659926c0b9d62f5040d43 /base
parentf330b76298ce76aa490c787dc10b5d018ab1d6e5 (diff)
downloadchromium_src-9267710a4799c6fc0fb39e9add0871e2e7b9b6a9.zip
chromium_src-9267710a4799c6fc0fb39e9add0871e2e7b9b6a9.tar.gz
chromium_src-9267710a4799c6fc0fb39e9add0871e2e7b9b6a9.tar.bz2
Atomic ops cleanup.
Use <stdint.h> instead of "base/basictypes.h". Put AtomicOps_Internalx86CPUFeaturesInit in the anonymous namespace. Fix formatting issues. BUG= Review URL: https://codereview.chromium.org/143273005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@249938 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base')
-rw-r--r--base/atomicops.h5
-rw-r--r--base/atomicops_internals_mac.h42
-rw-r--r--base/atomicops_internals_tsan.h234
-rw-r--r--base/atomicops_internals_x86_gcc.cc34
-rw-r--r--base/atomicops_internals_x86_msvc.h2
-rw-r--r--base/threading/thread_collision_warner.h1
6 files changed, 159 insertions, 159 deletions
diff --git a/base/atomicops.h b/base/atomicops.h
index 7f03492..3d15e03 100644
--- a/base/atomicops.h
+++ b/base/atomicops.h
@@ -28,7 +28,8 @@
#ifndef BASE_ATOMICOPS_H_
#define BASE_ATOMICOPS_H_
-#include "base/basictypes.h"
+#include <stdint.h>
+
#include "build/build_config.h"
#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
@@ -43,7 +44,7 @@
namespace base {
namespace subtle {
-typedef int32 Atomic32;
+typedef int32_t Atomic32;
#ifdef ARCH_CPU_64_BITS
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
diff --git a/base/atomicops_internals_mac.h b/base/atomicops_internals_mac.h
index 658ed54..ccbb896 100644
--- a/base/atomicops_internals_mac.h
+++ b/base/atomicops_internals_mac.h
@@ -12,7 +12,7 @@
namespace base {
namespace subtle {
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
@@ -26,7 +26,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
return prev_value;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
@@ -36,13 +36,13 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
return old_value;
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
@@ -50,7 +50,7 @@ inline void MemoryBarrier() {
OSMemoryBarrier();
}
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
@@ -64,7 +64,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
return prev_value;
}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
@@ -74,12 +74,12 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
@@ -88,13 +88,13 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
@@ -103,7 +103,7 @@ inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
// 64-bit implementation on 64-bit platform
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
@@ -117,7 +117,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
return prev_value;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
@@ -127,18 +127,18 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
return old_value;
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64Barrier(increment,
reinterpret_cast<volatile int64_t*>(ptr));
}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
@@ -152,7 +152,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
return prev_value;
}
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
@@ -164,12 +164,12 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
@@ -178,13 +178,13 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
diff --git a/base/atomicops_internals_tsan.h b/base/atomicops_internals_tsan.h
index 44d6400..2ac1b2c 100644
--- a/base/atomicops_internals_tsan.h
+++ b/base/atomicops_internals_tsan.h
@@ -31,9 +31,7 @@ namespace subtle {
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
-#ifdef __cplusplus
extern "C" {
-#endif
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
@@ -58,308 +56,306 @@ typedef enum {
__tsan_memory_order_seq_cst,
} __tsan_memory_order;
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
__tsan_memory_order mo);
-void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
__tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
- volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-#ifdef __cplusplus
} // extern "C"
-#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
- Atomic32 new_value) {
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
- Atomic32 new_value) {
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
-inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
- Atomic32 new_value) {
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
- Atomic32 increment) {
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release, __tsan_memory_order_relaxed);
return cmp;
}
-inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
return cmp;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
- Atomic64 new_value) {
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
- Atomic64 new_value) {
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
- Atomic64 new_value) {
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
- Atomic64 increment) {
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire, __tsan_memory_order_acquire);
return cmp;
}
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release, __tsan_memory_order_relaxed);
diff --git a/base/atomicops_internals_x86_gcc.cc b/base/atomicops_internals_x86_gcc.cc
index 933ca51..a208a98 100644
--- a/base/atomicops_internals_x86_gcc.cc
+++ b/base/atomicops_internals_x86_gcc.cc
@@ -5,10 +5,10 @@
// This module gets enough CPU information to optimize the
// atomicops module on x86.
+#include <stdint.h>
#include <string.h>
#include "base/atomicops.h"
-#include "base/basictypes.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
@@ -21,16 +21,16 @@
// must preserve that register's value across cpuid instructions.
#if defined(__i386__)
#define cpuid(a, b, c, d, inp) \
- asm ("mov %%ebx, %%edi\n" \
- "cpuid\n" \
- "xchg %%edi, %%ebx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#elif defined (__x86_64__)
+ asm("mov %%ebx, %%edi\n" \
+ "cpuid\n" \
+ "xchg %%edi, %%ebx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined(__x86_64__)
#define cpuid(a, b, c, d, inp) \
- asm ("mov %%rbx, %%rdi\n" \
- "cpuid\n" \
- "xchg %%rdi, %%rbx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+ asm("mov %%rbx, %%rdi\n" \
+ "cpuid\n" \
+ "xchg %%rdi, %%rbx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
#endif
#if defined(cpuid) // initialize the struct only on x86
@@ -43,12 +43,14 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
};
+namespace {
+
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-static void AtomicOps_Internalx86CPUFeaturesInit() {
- uint32 eax;
- uint32 ebx;
- uint32 ecx;
- uint32 edx;
+void AtomicOps_Internalx86CPUFeaturesInit() {
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
@@ -85,8 +87,6 @@ static void AtomicOps_Internalx86CPUFeaturesInit() {
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
}
-namespace {
-
class AtomicOpsx86Initializer {
public:
AtomicOpsx86Initializer() {
diff --git a/base/atomicops_internals_x86_msvc.h b/base/atomicops_internals_x86_msvc.h
index 3a2c72d..016744c 100644
--- a/base/atomicops_internals_x86_msvc.h
+++ b/base/atomicops_internals_x86_msvc.h
@@ -9,6 +9,8 @@
#include <windows.h>
+#include "base/macros.h"
+
#if defined(ARCH_CPU_64_BITS)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
diff --git a/base/threading/thread_collision_warner.h b/base/threading/thread_collision_warner.h
index d509a58..5172b2e 100644
--- a/base/threading/thread_collision_warner.h
+++ b/base/threading/thread_collision_warner.h
@@ -9,6 +9,7 @@
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/basictypes.h"
#include "base/compiler_specific.h"
// A helper class alongside macros to be used to verify assumptions about thread