summaryrefslogtreecommitdiffstats
path: root/runtime/base
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-03-06 12:13:39 -0800
committerIan Rogers <irogers@google.com>2014-03-06 19:16:01 -0800
commit719d1a33f6569864f529e5a3fff59e7bca97aad0 (patch)
treefcd84efd7b9806b93ec1a44e2317e6f882e7fe0e /runtime/base
parent5365eea9940269b662cfbe103caa348816ff1558 (diff)
downloadart-719d1a33f6569864f529e5a3fff59e7bca97aad0.zip
art-719d1a33f6569864f529e5a3fff59e7bca97aad0.tar.gz
art-719d1a33f6569864f529e5a3fff59e7bca97aad0.tar.bz2
Enable annotalysis on clang ART builds.
Fix clang build errors aswell as restructure locking/mutex code for correct thread safety analysis support. Reorder make dependencies so that host builds build first as they should provide better compilation errors than target. Remove host's use of -fno-omit-frame-pointer as it has no value with correct use of CFI, which we should have. Change-Id: I72cea8da9a3757b1a0b3acb4081feccb7c6cef90
Diffstat (limited to 'runtime/base')
-rw-r--r--runtime/base/logging.h28
-rw-r--r--runtime/base/macros.h72
-rw-r--r--runtime/base/mutex.cc82
-rw-r--r--runtime/base/mutex.h159
4 files changed, 264 insertions, 77 deletions
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 075d571..0fcec1f 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -192,7 +192,7 @@ class LogMessage {
: data_(new LogMessageData(file, line, severity, error)) {
}
- ~LogMessage() LOCKS_EXCLUDED(Locks::logging_lock_);
+ ~LogMessage(); // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_).
std::ostream& stream() {
return data_->buffer;
@@ -235,32 +235,6 @@ std::ostream& operator<<(std::ostream& os, const Dumpable<T>& rhs) {
return os;
}
-template<typename T>
-class MutatorLockedDumpable {
- public:
- explicit MutatorLockedDumpable(T& value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) {
- }
-
- void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- value_.Dump(os);
- }
-
- private:
- T& value_;
-
- DISALLOW_COPY_AND_ASSIGN(MutatorLockedDumpable);
-};
-
-template<typename T>
-std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs)
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis
-// currently fails for this.
- NO_THREAD_SAFETY_ANALYSIS {
- rhs.Dump(os);
- return os;
-}
-
// Helps you use operator<< in a const char*-like context such as our various 'F' methods with
// format strings.
template<typename T>
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 6cc9396..b193ff1 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -178,48 +178,40 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
template<typename T> void UNUSED(const T&) {}
-#if defined(__SUPPORT_TS_ANNOTATION__)
-
-#define ACQUIRED_AFTER(...) __attribute__ ((acquired_after(__VA_ARGS__)))
-#define ACQUIRED_BEFORE(...) __attribute__ ((acquired_before(__VA_ARGS__)))
-#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__ ((exclusive_lock(__VA_ARGS__)))
-#define EXCLUSIVE_LOCKS_REQUIRED(...) __attribute__ ((exclusive_locks_required(__VA_ARGS__)))
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...) __attribute__ ((exclusive_trylock(__VA_ARGS__)))
-#define GUARDED_BY(x) __attribute__ ((guarded_by(x)))
-#define GUARDED_VAR __attribute__ ((guarded))
-#define LOCKABLE __attribute__ ((lockable))
-#define LOCK_RETURNED(x) __attribute__ ((lock_returned(x)))
-#define LOCKS_EXCLUDED(...) __attribute__ ((locks_excluded(__VA_ARGS__)))
-#define NO_THREAD_SAFETY_ANALYSIS __attribute__ ((no_thread_safety_analysis))
-#define PT_GUARDED_BY(x) __attribute__ ((point_to_guarded_by(x)))
-#define PT_GUARDED_VAR __attribute__ ((point_to_guarded))
-#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable))
-#define SHARED_LOCK_FUNCTION(...) __attribute__ ((shared_lock(__VA_ARGS__)))
-#define SHARED_LOCKS_REQUIRED(...) __attribute__ ((shared_locks_required(__VA_ARGS__)))
-#define SHARED_TRYLOCK_FUNCTION(...) __attribute__ ((shared_trylock(__VA_ARGS__)))
-#define UNLOCK_FUNCTION(...) __attribute__ ((unlock(__VA_ARGS__)))
-
+// Annotalysis thread-safety analysis support.
+#if defined(__SUPPORT_TS_ANNOTATION__) || defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
-#define ACQUIRED_AFTER(...)
-#define ACQUIRED_BEFORE(...)
-#define EXCLUSIVE_LOCK_FUNCTION(...)
-#define EXCLUSIVE_LOCKS_REQUIRED(...)
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
-#define GUARDED_BY(x)
-#define GUARDED_VAR
-#define LOCKABLE
-#define LOCK_RETURNED(x)
-#define LOCKS_EXCLUDED(...)
-#define NO_THREAD_SAFETY_ANALYSIS
+#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+#define EXCLUSIVE_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+#define LOCKS_EXCLUDED(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#define PT_GUARDED_BY(x)
-#define PT_GUARDED_VAR
-#define SCOPED_LOCKABLE
-#define SHARED_LOCK_FUNCTION(...)
-#define SHARED_LOCKS_REQUIRED(...)
-#define SHARED_TRYLOCK_FUNCTION(...)
-#define UNLOCK_FUNCTION(...)
-
-#endif // defined(__SUPPORT_TS_ANNOTATION__)
+// THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
+#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+#define SHARED_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+#if defined(__clang__)
+#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+#else
+#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(__VA_ARGS__))
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(__VA_ARGS__))
+#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(__VA_ARGS__))
+#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(__VA_ARGS__))
+#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock(__VA_ARGS__))
+#endif
#endif // ART_RUNTIME_BASE_MACROS_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index ff72d16..fdf5763 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -29,6 +29,30 @@
namespace art {
+Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::breakpoint_lock_ = nullptr;
+Mutex* Locks::deoptimization_lock_ = nullptr;
+ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::logging_lock_ = nullptr;
+ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
+Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::thread_list_lock_ = nullptr;
+Mutex* Locks::thread_suspend_count_lock_ = nullptr;
+Mutex* Locks::trace_lock_ = nullptr;
+Mutex* Locks::profiler_lock_ = nullptr;
+Mutex* Locks::unexpected_signal_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
+
+struct AllMutexData {
+ // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
+ Atomic<const BaseMutex*> all_mutexes_guard;
+ // All created mutexes guarded by all_mutexes_guard_.
+ std::set<BaseMutex*>* all_mutexes;
+ AllMutexData() : all_mutexes(NULL) {}
+};
+static struct AllMutexData gAllMutexData[kAllMutexDataSize];
+
#if ART_USE_FUTEXES
static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
@@ -45,15 +69,6 @@ static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, co
}
#endif
-struct AllMutexData {
- // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
- Atomic<const BaseMutex*> all_mutexes_guard;
- // All created mutexes guarded by all_mutexes_guard_.
- std::set<BaseMutex*>* all_mutexes;
- AllMutexData() : all_mutexes(NULL) {}
-};
-static struct AllMutexData gAllMutexData[kAllMutexDataSize];
-
class ScopedAllMutexesLock {
public:
explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
@@ -792,4 +807,53 @@ void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
guard_.recursion_count_ = old_recursion_count;
}
+void Locks::Init() {
+ if (logging_lock_ != nullptr) {
+ // Already initialized.
+ DCHECK(abort_lock_ != nullptr);
+ DCHECK(breakpoint_lock_ != nullptr);
+ DCHECK(deoptimization_lock_ != nullptr);
+ DCHECK(classlinker_classes_lock_ != nullptr);
+ DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(logging_lock_ != nullptr);
+ DCHECK(mutator_lock_ != nullptr);
+ DCHECK(thread_list_lock_ != nullptr);
+ DCHECK(thread_suspend_count_lock_ != nullptr);
+ DCHECK(trace_lock_ != nullptr);
+ DCHECK(profiler_lock_ != nullptr);
+ DCHECK(unexpected_signal_lock_ != nullptr);
+ DCHECK(intern_table_lock_ != nullptr);
+ } else {
+ logging_lock_ = new Mutex("logging lock", kLoggingLock, true);
+ abort_lock_ = new Mutex("abort lock", kAbortLock, true);
+
+ DCHECK(breakpoint_lock_ == nullptr);
+ breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
+ DCHECK(deoptimization_lock_ == nullptr);
+ deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock);
+ DCHECK(classlinker_classes_lock_ == nullptr);
+ classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
+ kClassLinkerClassesLock);
+ DCHECK(heap_bitmap_lock_ == nullptr);
+ heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock);
+ DCHECK(mutator_lock_ == nullptr);
+ mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock);
+ DCHECK(runtime_shutdown_lock_ == nullptr);
+ runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock);
+ DCHECK(thread_list_lock_ == nullptr);
+ thread_list_lock_ = new Mutex("thread list lock", kThreadListLock);
+ DCHECK(thread_suspend_count_lock_ == nullptr);
+ thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock);
+ DCHECK(trace_lock_ == nullptr);
+ trace_lock_ = new Mutex("trace lock", kTraceLock);
+ DCHECK(profiler_lock_ == nullptr);
+ profiler_lock_ = new Mutex("profiler lock", kProfilerLock);
+ DCHECK(unexpected_signal_lock_ == nullptr);
+ unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true);
+ DCHECK(intern_table_lock_ == nullptr);
+ intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock);
+ }
+}
+
+
} // namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 63ed6cb..55ec1c3 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -27,7 +27,6 @@
#include "base/logging.h"
#include "base/macros.h"
#include "globals.h"
-#include "locks.h"
#if defined(__APPLE__)
#define ART_USE_FUTEXES 0
@@ -44,9 +43,56 @@
namespace art {
+class LOCKABLE ReaderWriterMutex;
class ScopedContentionRecorder;
class Thread;
+// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
+// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
+// partial ordering and thereby cause deadlock situations to fail checks.
+//
+// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
+enum LockLevel {
+ kLoggingLock = 0,
+ kUnexpectedSignalLock,
+ kThreadSuspendCountLock,
+ kAbortLock,
+ kJdwpSocketLock,
+ kRosAllocGlobalLock,
+ kRosAllocBracketLock,
+ kRosAllocBulkFreeLock,
+ kAllocSpaceLock,
+ kDexFileMethodInlinerLock,
+ kDexFileToMethodInlinerMapLock,
+ kMarkSweepMarkStackLock,
+ kTransactionLogLock,
+ kInternTableLock,
+ kMonitorPoolLock,
+ kDefaultMutexLevel,
+ kMarkSweepLargeObjectLock,
+ kPinTableLock,
+ kLoadLibraryLock,
+ kJdwpObjectRegistryLock,
+ kClassLinkerClassesLock,
+ kBreakpointLock,
+ kMonitorLock,
+ kThreadListLock,
+ kBreakpointInvokeLock,
+ kDeoptimizationLock,
+ kTraceLock,
+ kProfilerLock,
+ kJdwpEventListLock,
+ kJdwpAttachLock,
+ kJdwpStartLock,
+ kRuntimeShutdownLock,
+ kHeapBitmapLock,
+ kMutatorLock,
+ kZygoteCreationLock,
+
+ kLockLevelCount // Must come last.
+};
+std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
+
const bool kDebugLocking = kIsDebugBuild;
// Record Log contention information, dumpable via SIGQUIT.
@@ -413,6 +459,117 @@ class SCOPED_LOCKABLE WriterMutexLock {
// "WriterMutexLock mu(lock)".
#define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
+// Global mutexes corresponding to the levels above.
+class Locks {
+ public:
+ static void Init();
+
+ // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
+ // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
+ // a share on the mutator_lock_. The garbage collector may also execute with shared access but
+ // at times requires exclusive access to the heap (not to be confused with the heap meta-data
+ // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
+ // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
+ // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
+ // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
+ // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
+ // chance to acquire the lock.
+ //
+ // Thread suspension:
+ // Shared users | Exclusive user
+ // (holding mutator lock and in kRunnable state) | .. running ..
+ // .. running .. | Request thread suspension by:
+ // .. running .. | - acquiring thread_suspend_count_lock_
+ // .. running .. | - incrementing Thread::suspend_count_ on
+ // .. running .. | all mutator threads
+ // .. running .. | - releasing thread_suspend_count_lock_
+ // .. running .. | Block trying to acquire exclusive mutator lock
+ // Poll Thread::suspend_count_ and enter full | .. blocked ..
+ // suspend code. | .. blocked ..
+ // Change state to kSuspended | .. blocked ..
+ // x: Release share on mutator_lock_ | Carry out exclusive access
+ // Acquire thread_suspend_count_lock_ | .. exclusive ..
+ // while Thread::suspend_count_ > 0 | .. exclusive ..
+ // - wait on Thread::resume_cond_ | .. exclusive ..
+ // (releases thread_suspend_count_lock_) | .. exclusive ..
+ // .. waiting .. | Release mutator_lock_
+ // .. waiting .. | Request thread resumption by:
+ // .. waiting .. | - acquiring thread_suspend_count_lock_
+ // .. waiting .. | - decrementing Thread::suspend_count_ on
+ // .. waiting .. | all mutator threads
+ // .. waiting .. | - notifying on Thread::resume_cond_
+ // - re-acquire thread_suspend_count_lock_ | - releasing thread_suspend_count_lock_
+ // Release thread_suspend_count_lock_ | .. running ..
+ // Acquire share on mutator_lock_ | .. running ..
+ // - This could block but the thread still | .. running ..
+ // has a state of kSuspended and so this | .. running ..
+ // isn't an issue. | .. running ..
+ // Acquire thread_suspend_count_lock_ | .. running ..
+ // - we poll here as we're transitioning into | .. running ..
+ // kRunnable and an individual thread suspend | .. running ..
+ // request (e.g for debugging) won't try | .. running ..
+ // to acquire the mutator lock (which would | .. running ..
+ // block as we hold the mutator lock). This | .. running ..
+ // poll ensures that if the suspender thought | .. running ..
+ // we were suspended by incrementing our | .. running ..
+ // Thread::suspend_count_ and then reading | .. running ..
+ // our state we go back to waiting on | .. running ..
+ // Thread::resume_cond_. | .. running ..
+ // can_go_runnable = Thread::suspend_count_ == 0 | .. running ..
+ // Release thread_suspend_count_lock_ | .. running ..
+ // if can_go_runnable | .. running ..
+ // Change state to kRunnable | .. running ..
+ // else | .. running ..
+ // Goto x | .. running ..
+ // .. running .. | .. running ..
+ static ReaderWriterMutex* mutator_lock_;
+
+ // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
+ static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
+
+ // Guards shutdown of the runtime.
+ static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+
+ // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
+ // attaching and detaching.
+ static Mutex* thread_list_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
+ // Guards breakpoints.
+ static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
+ // Guards deoptimization requests.
+ static Mutex* deoptimization_lock_ ACQUIRED_AFTER(breakpoint_lock_);
+
+ // Guards trace requests.
+ static Mutex* trace_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+
+ // Guards profile objects.
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
+
+ // Guards lists of classes within the class linker.
+ static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_);
+
+ // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+ // doesn't try to hold a higher level Mutex.
+ #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
+
+ // Guards intern table.
+ static Mutex* intern_table_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+ // Have an exclusive aborting thread.
+ static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+ // Allow mutual exclusion when manipulating Thread::suspend_count_.
+ // TODO: Does the trade-off of a per-thread lock make sense?
+ static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
+
+ // One unexpected signal at a time lock.
+ static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+
+ // Have an exclusive logging thread.
+ static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+};
+
} // namespace art
#endif // ART_RUNTIME_BASE_MUTEX_H_