summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
Diffstat (limited to 'base')
-rw-r--r--base/threading/thread_local_storage.cc6
-rw-r--r--base/threading/thread_local_storage.h7
-rw-r--r--base/tracked_objects.cc23
-rw-r--r--base/tracked_objects.h3
4 files changed, 22 insertions, 17 deletions
diff --git a/base/threading/thread_local_storage.cc b/base/threading/thread_local_storage.cc
index 0bb396c..701f6a2 100644
--- a/base/threading/thread_local_storage.cc
+++ b/base/threading/thread_local_storage.cc
@@ -192,8 +192,8 @@ void PlatformThreadLocalStorage::OnThreadExit(void* value) {
} // namespace internal
ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
- initialized_ = false;
slot_ = 0;
+ base::subtle::Release_Store(&initialized_, 0);
Initialize(destructor);
}
@@ -211,7 +211,7 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
// Setup our destructor.
g_tls_destructors[slot_] = destructor;
- initialized_ = true;
+ base::subtle::Release_Store(&initialized_, 1);
}
void ThreadLocalStorage::StaticSlot::Free() {
@@ -221,7 +221,7 @@ void ThreadLocalStorage::StaticSlot::Free() {
DCHECK_LT(slot_, kThreadLocalStorageSize);
g_tls_destructors[slot_] = NULL;
slot_ = 0;
- initialized_ = false;
+ base::subtle::Release_Store(&initialized_, 0);
}
void* ThreadLocalStorage::StaticSlot::Get() const {
diff --git a/base/threading/thread_local_storage.h b/base/threading/thread_local_storage.h
index 50f8868..195bff6 100644
--- a/base/threading/thread_local_storage.h
+++ b/base/threading/thread_local_storage.h
@@ -5,6 +5,7 @@
#ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
#define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/basictypes.h"
@@ -114,10 +115,12 @@ class BASE_EXPORT ThreadLocalStorage {
// value 'value'.
void Set(void* value);
- bool initialized() const { return initialized_; }
+ bool initialized() const {
+ return base::subtle::Acquire_Load(&initialized_) != 0;
+ }
// The internals of this struct should be considered private.
- bool initialized_;
+ base::subtle::Atomic32 initialized_;
int slot_;
};
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 9db05c0..c7a6a3f 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -310,7 +310,7 @@ base::LazyInstance<base::Lock>::Leaky
ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
// static
-ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
+base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
ThreadData::ThreadData(const std::string& suggested_name)
: next_(NULL),
@@ -692,7 +692,7 @@ static void OptionallyInitializeAlternateTimer() {
}
void ThreadData::Initialize() {
- if (status_ >= DEACTIVATED)
+ if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
return; // Someone else did the initialization.
// Due to racy lazy initialization in tests, we'll need to recheck status_
// after we acquire the lock.
@@ -701,7 +701,7 @@ void ThreadData::Initialize() {
// threaded in the product, but some tests may be racy and lazy about our
// initialization.
base::AutoLock lock(*list_lock_.Pointer());
- if (status_ >= DEACTIVATED)
+ if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
return; // Someone raced in here and beat us.
// Put an alternate timer in place if the environment calls for it, such as
@@ -714,12 +714,12 @@ void ThreadData::Initialize() {
// Perform the "real" TLS initialization now, and leave it intact through
// process termination.
if (!tls_index_.initialized()) { // Testing may have initialized this.
- DCHECK_EQ(status_, UNINITIALIZED);
+ DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED);
tls_index_.Initialize(&ThreadData::OnThreadTermination);
DCHECK(tls_index_.initialized());
} else {
// TLS was initialzed for us earlier.
- DCHECK_EQ(status_, DORMANT_DURING_TESTS);
+ DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), DORMANT_DURING_TESTS);
}
// Incarnation counter is only significant to testing, as it otherwise will
@@ -729,8 +729,8 @@ void ThreadData::Initialize() {
// The lock is not critical for setting status_, but it doesn't hurt. It also
// ensures that if we have a racy initialization, that we'll bail as soon as
// we get the lock earlier in this method.
- status_ = kInitialStartupState;
- DCHECK(status_ != UNINITIALIZED);
+ base::subtle::Release_Store(&status_, kInitialStartupState);
+ DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
}
// static
@@ -742,17 +742,17 @@ void ThreadData::InitializeAndSetTrackingStatus(Status status) {
if (status > DEACTIVATED)
status = PROFILING_ACTIVE;
- status_ = status;
+ base::subtle::Release_Store(&status_, status);
}
// static
ThreadData::Status ThreadData::status() {
- return status_;
+ return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
}
// static
bool ThreadData::TrackingStatus() {
- return status_ > DEACTIVATED;
+ return base::subtle::Acquire_Load(&status_) > DEACTIVATED;
}
// static
@@ -817,7 +817,8 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
worker_thread_data_creation_count_ = 0;
cleanup_count_ = 0;
tls_index_.Set(NULL);
- status_ = DORMANT_DURING_TESTS; // Almost UNINITIALIZED.
+ // Almost UNINITIALIZED.
+ base::subtle::Release_Store(&status_, DORMANT_DURING_TESTS);
// To avoid any chance of racing in unit tests, which is the only place we
// call this function, we may sometimes leak all the data structures we
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 8f83794..c1e0708 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -12,6 +12,7 @@
#include <utility>
#include <vector>
+#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/basictypes.h"
#include "base/containers/hash_tables.h"
@@ -661,7 +662,7 @@ class BASE_EXPORT ThreadData {
static base::LazyInstance<base::Lock>::Leaky list_lock_;
// We set status_ to SHUTDOWN when we shut down the tracking service.
- static Status status_;
+ static base::subtle::Atomic32 status_;
// Link to next instance (null terminated list). Used to globally track all
// registered instances (corresponds to all registered threads where we keep