diff options
68 files changed, 500 insertions, 531 deletions
diff --git a/build/Android.common.mk b/build/Android.common.mk index 0522456..f58aabc 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -173,22 +173,24 @@ $(error Required DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES is not set) endif ART_TARGET_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) -# Enable thread-safety for GCC 4.6 on the target but not for GCC 4.7 where this feature was removed. +# Enable thread-safety for GCC 4.6, and clang, but not for GCC 4.7 or later where this feature was +# removed. Warn when -Wthread-safety is not used. ifneq ($(filter 4.6 4.6.%, $(TARGET_GCC_VERSION)),) ART_TARGET_CFLAGS += -Wthread-safety else - # Warn if not using GCC 4.6 for target builds when not doing a top-level or 'mma' build. - ifneq ($(ONE_SHOT_MAKEFILE),) - # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6 - $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.) + ifeq ($(ART_TARGET_CLANG),true) + ART_TARGET_CFLAGS += -Wthread-safety + else + # Warn if -Wthread-safety is not suport and not doing a top-level or 'mma' build. + ifneq ($(ONE_SHOT_MAKEFILE),) + # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6 + $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.) + endif endif endif -# We build with GCC 4.6 on the host. +# We compile with GCC 4.6 or clang on the host, both of which support -Wthread-safety. ART_HOST_CFLAGS += -Wthread-safety -# Make host builds easier to debug and profile by not omitting the frame pointer. -ART_HOST_CFLAGS += -fno-omit-frame-pointer - # To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16" # ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs diff --git a/compiler/Android.mk b/compiler/Android.mk index 499f23f..e3c1958 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -260,12 +260,6 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT endef -ifeq ($(ART_BUILD_TARGET_NDEBUG),true) - $(eval $(call build-libart-compiler,target,ndebug)) -endif -ifeq ($(ART_BUILD_TARGET_DEBUG),true) - $(eval $(call build-libart-compiler,target,debug)) -endif ifeq ($(WITH_HOST_DALVIK),true) # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. ifeq ($(ART_BUILD_NDEBUG),true) @@ -275,6 +269,12 @@ ifeq ($(WITH_HOST_DALVIK),true) $(eval $(call build-libart-compiler,host,debug)) endif endif +ifeq ($(ART_BUILD_TARGET_NDEBUG),true) + $(eval $(call build-libart-compiler,target,ndebug)) +endif +ifeq ($(ART_BUILD_TARGET_DEBUG),true) + $(eval $(call build-libart-compiler,target,debug)) +endif # Rule to build /system/lib/libcompiler_rt.a # Usually static libraries are not installed on the device. diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h index bca72b8..def7b68 100644 --- a/compiler/common_compiler_test.h +++ b/compiler/common_compiler_test.h @@ -35,9 +35,9 @@ namespace art { // A signal handler called when have an illegal instruction. We record the fact in // a global boolean and then increment the PC in the signal context to return to // the next instruction. We know the instruction is an sdiv (4 bytes long). -static void baddivideinst(int signo, siginfo *si, void *data) { - (void)signo; - (void)si; +static inline void baddivideinst(int signo, siginfo *si, void *data) { + UNUSED(signo); + UNUSED(si); struct ucontext *uc = (struct ucontext *)data; struct sigcontext *sc = &uc->uc_mcontext; sc->arm_r0 = 0; // set R0 to #0 to signal error @@ -56,7 +56,7 @@ static void baddivideinst(int signo, siginfo *si, void *data) { extern "C" bool CheckForARMSDIVInstruction(); -static InstructionSetFeatures GuessInstructionFeatures() { +static inline InstructionSetFeatures GuessInstructionFeatures() { InstructionSetFeatures f; // Uncomment this for processing of /proc/cpuinfo. @@ -107,7 +107,7 @@ static InstructionSetFeatures GuessInstructionFeatures() { // Given a set of instruction features from the build, parse it. The // input 'str' is a comma separated list of feature names. Parse it and // return the InstructionSetFeatures object. -static InstructionSetFeatures ParseFeatureList(std::string str) { +static inline InstructionSetFeatures ParseFeatureList(std::string str) { InstructionSetFeatures result; typedef std::vector<std::string> FeatureList; FeatureList features; diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index cb424d9..7423393 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -18,7 +18,6 @@ #include "base/macros.h" #include "base/mutex.h" #include "base/mutex-inl.h" -#include "locks.h" #include "thread.h" #include "thread-inl.h" #include "dex/mir_graph.h" diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h index b4d8dd6..4aff01c 100644 --- a/compiler/dex/quick/dex_file_method_inliner.h +++ b/compiler/dex/quick/dex_file_method_inliner.h @@ -23,7 +23,6 @@ #include "safe_map.h" #include "dex/compiler_enums.h" #include "dex_file.h" -#include "locks.h" namespace art { diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h index aa0e72a..257e70c 100644 --- a/compiler/dex/verified_method.h +++ b/compiler/dex/verified_method.h @@ -19,6 +19,7 @@ #include <vector> +#include "base/mutex.h" #include "method_reference.h" #include "safe_map.h" diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h index 3610d1a..03b965a 100644 --- a/compiler/elf_writer.h +++ b/compiler/elf_writer.h @@ -23,6 +23,7 @@ #include <vector> #include "base/macros.h" +#include "base/mutex.h" #include "os.h" namespace art { diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h index 21245db..cb5aa27 100644 --- a/compiler/trampolines/trampoline_compiler.h +++ b/compiler/trampolines/trampoline_compiler.h @@ -20,7 +20,6 @@ #include <stdint.h> #include <vector> -#include "locks.h" #include "driver/compiler_driver.h" namespace art { diff --git a/compiler/utils/arm64/managed_register_arm64.cc b/compiler/utils/arm64/managed_register_arm64.cc index cc0b509..de5cb8c 100644 --- a/compiler/utils/arm64/managed_register_arm64.cc +++ b/compiler/utils/arm64/managed_register_arm64.cc @@ -27,10 +27,10 @@ namespace arm64 { // * [W0, W15] // * [D0, D31] // * [S0, S31] -static const int kNumberOfAvailableCoreRegisters = (X15 - X0) + 1; -static const int kNumberOfAvailableWRegisters = (W15 - W0) + 1; -static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters; -static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters; +// static const int kNumberOfAvailableCoreRegisters = (X15 - X0) + 1; +// static const int kNumberOfAvailableWRegisters = (W15 - W0) + 1; +// static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters; +// static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters; // Returns true if this managed-register overlaps the other managed-register. // GP Register Bank: diff --git a/runtime/Android.mk b/runtime/Android.mk index bb1bc99..18e2d3e 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -84,7 +84,6 @@ LIBART_COMMON_SRC_FILES := \ jdwp/object_registry.cc \ jni_internal.cc \ jobject_comparator.cc \ - locks.cc \ mem_map.cc \ memory_region.cc \ mirror/art_field.cc \ @@ -289,7 +288,6 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ invoke_type.h \ jdwp/jdwp.h \ jdwp/jdwp_constants.h \ - locks.h \ lock_word.h \ mirror/class.h \ oat.h \ @@ -425,14 +423,8 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT endif endef -ifeq ($(ART_BUILD_TARGET_NDEBUG),true) - $(eval $(call build-libart,target,ndebug,$(ART_TARGET_CLANG))) -endif -ifeq ($(ART_BUILD_TARGET_DEBUG),true) - $(eval $(call build-libart,target,debug,$(ART_TARGET_CLANG))) -endif - -# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. +# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since +# they are used to cross compile for the target. ifeq ($(WITH_HOST_DALVIK),true) ifeq ($(ART_BUILD_NDEBUG),true) $(eval $(call build-libart,host,ndebug,$(ART_HOST_CLANG))) @@ -441,3 +433,10 @@ ifeq ($(WITH_HOST_DALVIK),true) $(eval $(call build-libart,host,debug,$(ART_HOST_CLANG))) endif endif + +ifeq ($(ART_BUILD_TARGET_NDEBUG),true) + $(eval $(call build-libart,target,ndebug,$(ART_TARGET_CLANG))) +endif +ifeq ($(ART_BUILD_TARGET_DEBUG),true) + $(eval $(call build-libart,target,debug,$(ART_TARGET_CLANG))) +endif diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h index 4a0d082..2ccce8d 100644 --- a/runtime/arch/arm/context_arm.h +++ b/runtime/arch/arm/context_arm.h @@ -17,7 +17,6 @@ #ifndef ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_ #define ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_ -#include "locks.h" #include "arch/context.h" #include "base/logging.h" #include "registers_arm.h" diff --git a/runtime/arch/context.h b/runtime/arch/context.h index 83bbb11..f7b7835 100644 --- a/runtime/arch/context.h +++ b/runtime/arch/context.h @@ -20,7 +20,7 @@ #include <stddef.h> #include <stdint.h> -#include "locks.h" +#include "base/mutex.h" namespace art { diff --git a/runtime/barrier.h b/runtime/barrier.h index e335c32..0c7fd87 100644 --- a/runtime/barrier.h +++ b/runtime/barrier.h @@ -18,7 +18,6 @@ #define ART_RUNTIME_BARRIER_H_ #include "base/mutex.h" -#include "locks.h" #include "UniquePtr.h" namespace art { diff --git a/runtime/base/logging.h b/runtime/base/logging.h index 075d571..0fcec1f 100644 --- a/runtime/base/logging.h +++ b/runtime/base/logging.h @@ -192,7 +192,7 @@ class LogMessage { : data_(new LogMessageData(file, line, severity, error)) { } - ~LogMessage() LOCKS_EXCLUDED(Locks::logging_lock_); + ~LogMessage(); // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_). std::ostream& stream() { return data_->buffer; @@ -235,32 +235,6 @@ std::ostream& operator<<(std::ostream& os, const Dumpable<T>& rhs) { return os; } -template<typename T> -class MutatorLockedDumpable { - public: - explicit MutatorLockedDumpable(T& value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) { - } - - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - value_.Dump(os); - } - - private: - T& value_; - - DISALLOW_COPY_AND_ASSIGN(MutatorLockedDumpable); -}; - -template<typename T> -std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) -// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis -// currently fails for this. - NO_THREAD_SAFETY_ANALYSIS { - rhs.Dump(os); - return os; -} - // Helps you use operator<< in a const char*-like context such as our various 'F' methods with // format strings. template<typename T> diff --git a/runtime/base/macros.h b/runtime/base/macros.h index 6cc9396..b193ff1 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -178,48 +178,40 @@ char (&ArraySizeHelper(T (&array)[N]))[N]; template<typename T> void UNUSED(const T&) {} -#if defined(__SUPPORT_TS_ANNOTATION__) - -#define ACQUIRED_AFTER(...) __attribute__ ((acquired_after(__VA_ARGS__))) -#define ACQUIRED_BEFORE(...) __attribute__ ((acquired_before(__VA_ARGS__))) -#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__ ((exclusive_lock(__VA_ARGS__))) -#define EXCLUSIVE_LOCKS_REQUIRED(...) __attribute__ ((exclusive_locks_required(__VA_ARGS__))) -#define EXCLUSIVE_TRYLOCK_FUNCTION(...) __attribute__ ((exclusive_trylock(__VA_ARGS__))) -#define GUARDED_BY(x) __attribute__ ((guarded_by(x))) -#define GUARDED_VAR __attribute__ ((guarded)) -#define LOCKABLE __attribute__ ((lockable)) -#define LOCK_RETURNED(x) __attribute__ ((lock_returned(x))) -#define LOCKS_EXCLUDED(...) __attribute__ ((locks_excluded(__VA_ARGS__))) -#define NO_THREAD_SAFETY_ANALYSIS __attribute__ ((no_thread_safety_analysis)) -#define PT_GUARDED_BY(x) __attribute__ ((point_to_guarded_by(x))) -#define PT_GUARDED_VAR __attribute__ ((point_to_guarded)) -#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable)) -#define SHARED_LOCK_FUNCTION(...) __attribute__ ((shared_lock(__VA_ARGS__))) -#define SHARED_LOCKS_REQUIRED(...) __attribute__ ((shared_locks_required(__VA_ARGS__))) -#define SHARED_TRYLOCK_FUNCTION(...) __attribute__ ((shared_trylock(__VA_ARGS__))) -#define UNLOCK_FUNCTION(...) __attribute__ ((unlock(__VA_ARGS__))) - +// Annotalysis thread-safety analysis support. +#if defined(__SUPPORT_TS_ANNOTATION__) || defined(__clang__) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif -#define ACQUIRED_AFTER(...) -#define ACQUIRED_BEFORE(...) -#define EXCLUSIVE_LOCK_FUNCTION(...) -#define EXCLUSIVE_LOCKS_REQUIRED(...) -#define EXCLUSIVE_TRYLOCK_FUNCTION(...) -#define GUARDED_BY(x) -#define GUARDED_VAR -#define LOCKABLE -#define LOCK_RETURNED(x) -#define LOCKS_EXCLUDED(...) -#define NO_THREAD_SAFETY_ANALYSIS +#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) +#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) +#define EXCLUSIVE_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) +#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded) +#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable) +#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) +#define LOCKS_EXCLUDED(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) +#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) #define PT_GUARDED_BY(x) -#define PT_GUARDED_VAR -#define SCOPED_LOCKABLE -#define SHARED_LOCK_FUNCTION(...) -#define SHARED_LOCKS_REQUIRED(...) -#define SHARED_TRYLOCK_FUNCTION(...) -#define UNLOCK_FUNCTION(...) - -#endif // defined(__SUPPORT_TS_ANNOTATION__) +// THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x)) +#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded) +#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) +#define SHARED_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + +#if defined(__clang__) +#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) +#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) +#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) +#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) +#else +#define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(__VA_ARGS__)) +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(__VA_ARGS__)) +#define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(__VA_ARGS__)) +#define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(__VA_ARGS__)) +#define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock(__VA_ARGS__)) +#endif #endif // ART_RUNTIME_BASE_MACROS_H_ diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index ff72d16..fdf5763 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -29,6 +29,30 @@ namespace art { +Mutex* Locks::abort_lock_ = nullptr; +Mutex* Locks::breakpoint_lock_ = nullptr; +Mutex* Locks::deoptimization_lock_ = nullptr; +ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr; +ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr; +Mutex* Locks::logging_lock_ = nullptr; +ReaderWriterMutex* Locks::mutator_lock_ = nullptr; +Mutex* Locks::runtime_shutdown_lock_ = nullptr; +Mutex* Locks::thread_list_lock_ = nullptr; +Mutex* Locks::thread_suspend_count_lock_ = nullptr; +Mutex* Locks::trace_lock_ = nullptr; +Mutex* Locks::profiler_lock_ = nullptr; +Mutex* Locks::unexpected_signal_lock_ = nullptr; +Mutex* Locks::intern_table_lock_ = nullptr; + +struct AllMutexData { + // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). + Atomic<const BaseMutex*> all_mutexes_guard; + // All created mutexes guarded by all_mutexes_guard_. + std::set<BaseMutex*>* all_mutexes; + AllMutexData() : all_mutexes(NULL) {} +}; +static struct AllMutexData gAllMutexData[kAllMutexDataSize]; + #if ART_USE_FUTEXES static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) { const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds. @@ -45,15 +69,6 @@ static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, co } #endif -struct AllMutexData { - // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). - Atomic<const BaseMutex*> all_mutexes_guard; - // All created mutexes guarded by all_mutexes_guard_. - std::set<BaseMutex*>* all_mutexes; - AllMutexData() : all_mutexes(NULL) {} -}; -static struct AllMutexData gAllMutexData[kAllMutexDataSize]; - class ScopedAllMutexesLock { public: explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { @@ -792,4 +807,53 @@ void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { guard_.recursion_count_ = old_recursion_count; } +void Locks::Init() { + if (logging_lock_ != nullptr) { + // Already initialized. + DCHECK(abort_lock_ != nullptr); + DCHECK(breakpoint_lock_ != nullptr); + DCHECK(deoptimization_lock_ != nullptr); + DCHECK(classlinker_classes_lock_ != nullptr); + DCHECK(heap_bitmap_lock_ != nullptr); + DCHECK(logging_lock_ != nullptr); + DCHECK(mutator_lock_ != nullptr); + DCHECK(thread_list_lock_ != nullptr); + DCHECK(thread_suspend_count_lock_ != nullptr); + DCHECK(trace_lock_ != nullptr); + DCHECK(profiler_lock_ != nullptr); + DCHECK(unexpected_signal_lock_ != nullptr); + DCHECK(intern_table_lock_ != nullptr); + } else { + logging_lock_ = new Mutex("logging lock", kLoggingLock, true); + abort_lock_ = new Mutex("abort lock", kAbortLock, true); + + DCHECK(breakpoint_lock_ == nullptr); + breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock); + DCHECK(deoptimization_lock_ == nullptr); + deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock); + DCHECK(classlinker_classes_lock_ == nullptr); + classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", + kClassLinkerClassesLock); + DCHECK(heap_bitmap_lock_ == nullptr); + heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock); + DCHECK(mutator_lock_ == nullptr); + mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock); + DCHECK(runtime_shutdown_lock_ == nullptr); + runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock); + DCHECK(thread_list_lock_ == nullptr); + thread_list_lock_ = new Mutex("thread list lock", kThreadListLock); + DCHECK(thread_suspend_count_lock_ == nullptr); + thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock); + DCHECK(trace_lock_ == nullptr); + trace_lock_ = new Mutex("trace lock", kTraceLock); + DCHECK(profiler_lock_ == nullptr); + profiler_lock_ = new Mutex("profiler lock", kProfilerLock); + DCHECK(unexpected_signal_lock_ == nullptr); + unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true); + DCHECK(intern_table_lock_ == nullptr); + intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock); + } +} + + } // namespace art diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 63ed6cb..55ec1c3 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -27,7 +27,6 @@ #include "base/logging.h" #include "base/macros.h" #include "globals.h" -#include "locks.h" #if defined(__APPLE__) #define ART_USE_FUTEXES 0 @@ -44,9 +43,56 @@ namespace art { +class LOCKABLE ReaderWriterMutex; class ScopedContentionRecorder; class Thread; +// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or +// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free +// partial ordering and thereby cause deadlock situations to fail checks. +// +// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163 +enum LockLevel { + kLoggingLock = 0, + kUnexpectedSignalLock, + kThreadSuspendCountLock, + kAbortLock, + kJdwpSocketLock, + kRosAllocGlobalLock, + kRosAllocBracketLock, + kRosAllocBulkFreeLock, + kAllocSpaceLock, + kDexFileMethodInlinerLock, + kDexFileToMethodInlinerMapLock, + kMarkSweepMarkStackLock, + kTransactionLogLock, + kInternTableLock, + kMonitorPoolLock, + kDefaultMutexLevel, + kMarkSweepLargeObjectLock, + kPinTableLock, + kLoadLibraryLock, + kJdwpObjectRegistryLock, + kClassLinkerClassesLock, + kBreakpointLock, + kMonitorLock, + kThreadListLock, + kBreakpointInvokeLock, + kDeoptimizationLock, + kTraceLock, + kProfilerLock, + kJdwpEventListLock, + kJdwpAttachLock, + kJdwpStartLock, + kRuntimeShutdownLock, + kHeapBitmapLock, + kMutatorLock, + kZygoteCreationLock, + + kLockLevelCount // Must come last. +}; +std::ostream& operator<<(std::ostream& os, const LockLevel& rhs); + const bool kDebugLocking = kIsDebugBuild; // Record Log contention information, dumpable via SIGQUIT. @@ -413,6 +459,117 @@ class SCOPED_LOCKABLE WriterMutexLock { // "WriterMutexLock mu(lock)". #define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name) +// Global mutexes corresponding to the levels above. +class Locks { + public: + static void Init(); + + // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block + // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds + // a share on the mutator_lock_. The garbage collector may also execute with shared access but + // at times requires exclusive access to the heap (not to be confused with the heap meta-data + // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks + // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_ + // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition + // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on + // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector) + // chance to acquire the lock. + // + // Thread suspension: + // Shared users | Exclusive user + // (holding mutator lock and in kRunnable state) | .. running .. + // .. running .. | Request thread suspension by: + // .. running .. | - acquiring thread_suspend_count_lock_ + // .. running .. | - incrementing Thread::suspend_count_ on + // .. running .. | all mutator threads + // .. running .. | - releasing thread_suspend_count_lock_ + // .. running .. | Block trying to acquire exclusive mutator lock + // Poll Thread::suspend_count_ and enter full | .. blocked .. + // suspend code. | .. blocked .. + // Change state to kSuspended | .. blocked .. + // x: Release share on mutator_lock_ | Carry out exclusive access + // Acquire thread_suspend_count_lock_ | .. exclusive .. + // while Thread::suspend_count_ > 0 | .. exclusive .. + // - wait on Thread::resume_cond_ | .. exclusive .. + // (releases thread_suspend_count_lock_) | .. exclusive .. + // .. waiting .. | Release mutator_lock_ + // .. waiting .. | Request thread resumption by: + // .. waiting .. | - acquiring thread_suspend_count_lock_ + // .. waiting .. | - decrementing Thread::suspend_count_ on + // .. waiting .. | all mutator threads + // .. waiting .. | - notifying on Thread::resume_cond_ + // - re-acquire thread_suspend_count_lock_ | - releasing thread_suspend_count_lock_ + // Release thread_suspend_count_lock_ | .. running .. + // Acquire share on mutator_lock_ | .. running .. + // - This could block but the thread still | .. running .. + // has a state of kSuspended and so this | .. running .. + // isn't an issue. | .. running .. + // Acquire thread_suspend_count_lock_ | .. running .. + // - we poll here as we're transitioning into | .. running .. + // kRunnable and an individual thread suspend | .. running .. + // request (e.g for debugging) won't try | .. running .. + // to acquire the mutator lock (which would | .. running .. + // block as we hold the mutator lock). This | .. running .. + // poll ensures that if the suspender thought | .. running .. + // we were suspended by incrementing our | .. running .. + // Thread::suspend_count_ and then reading | .. running .. + // our state we go back to waiting on | .. running .. + // Thread::resume_cond_. | .. running .. + // can_go_runnable = Thread::suspend_count_ == 0 | .. running .. + // Release thread_suspend_count_lock_ | .. running .. + // if can_go_runnable | .. running .. + // Change state to kRunnable | .. running .. + // else | .. running .. + // Goto x | .. running .. + // .. running .. | .. running .. + static ReaderWriterMutex* mutator_lock_; + + // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap. + static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_); + + // Guards shutdown of the runtime. + static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_); + + // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads + // attaching and detaching. + static Mutex* thread_list_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_); + + // Guards breakpoints. + static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_); + + // Guards deoptimization requests. + static Mutex* deoptimization_lock_ ACQUIRED_AFTER(breakpoint_lock_); + + // Guards trace requests. + static Mutex* trace_lock_ ACQUIRED_AFTER(deoptimization_lock_); + + // Guards profile objects. + static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_); + + // Guards lists of classes within the class linker. + static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_); + + // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code + // doesn't try to hold a higher level Mutex. + #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_) + + // Guards intern table. + static Mutex* intern_table_lock_ ACQUIRED_AFTER(classlinker_classes_lock_); + + // Have an exclusive aborting thread. + static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_); + + // Allow mutual exclusion when manipulating Thread::suspend_count_. + // TODO: Does the trade-off of a per-thread lock make sense? + static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_); + + // One unexpected signal at a time lock. + static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_); + + // Have an exclusive logging thread. + static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); +}; + } // namespace art #endif // ART_RUNTIME_BASE_MUTEX_H_ diff --git a/runtime/class_linker.h b/runtime/class_linker.h index aad7cfc..701e62e 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -260,7 +260,7 @@ class ClassLinker { bool GenerateOatFile(const char* dex_filename, int oat_fd, const char* oat_cache_filename, - std::string* error_msg); + std::string* error_msg) LOCKS_EXCLUDED(Locks::mutator_lock_); const OatFile* FindOatFileFromOatLocation(const std::string& location, @@ -519,7 +519,7 @@ class ClassLinker { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const OatFile* FindOpenedOatFileFromDexLocation(const char* dex_location, const uint32_t* const dex_location_checksum) - LOCKS_EXCLUDED(dex_lock); + LOCKS_EXCLUDED(dex_lock_); const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location) LOCKS_EXCLUDED(dex_lock_); const DexFile* FindDexFileInOatLocation(const char* dex_location, diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h index 7233d8e..b07043f 100644 --- a/runtime/compiler_callbacks.h +++ b/runtime/compiler_callbacks.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_COMPILER_CALLBACKS_H_ #define ART_RUNTIME_COMPILER_CALLBACKS_H_ +#include "base/mutex.h" #include "class_reference.h" -#include "locks.h" namespace art { diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 3b4e9c7..7e2dfd2 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -184,14 +184,14 @@ static Dbg::HpsgWhat gDdmHpsgWhat; static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER; static Dbg::HpsgWhat gDdmNhsgWhat; -static ObjectRegistry* gRegistry = NULL; +static ObjectRegistry* gRegistry = nullptr; // Recent allocation tracking. -static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER("AllocTracker lock"); -AllocRecord* Dbg::recent_allocation_records_ PT_GUARDED_BY(gAllocTrackerLock) = NULL; // TODO: CircularBuffer<AllocRecord> -static size_t gAllocRecordMax GUARDED_BY(gAllocTrackerLock) = 0; -static size_t gAllocRecordHead GUARDED_BY(gAllocTrackerLock) = 0; -static size_t gAllocRecordCount GUARDED_BY(gAllocTrackerLock) = 0; +Mutex* Dbg::alloc_tracker_lock_ = nullptr; +AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord> +size_t Dbg::alloc_record_max_ = 0; +size_t Dbg::alloc_record_head_ = 0; +size_t Dbg::alloc_record_count_ = 0; // Deoptimization support. struct MethodInstrumentationRequest { @@ -468,9 +468,10 @@ void Dbg::StartJdwp() { return; } - CHECK(gRegistry == NULL); + CHECK(gRegistry == nullptr); gRegistry = new ObjectRegistry; + alloc_tracker_lock_ = new Mutex("AllocTracker lock"); // Init JDWP if the debugger is enabled. This may connect out to a // debugger, passively listen for a debugger, or block waiting for a // debugger. @@ -496,9 +497,11 @@ void Dbg::StopJdwp() { // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection. Disposed(); delete gJdwpState; - gJdwpState = NULL; + gJdwpState = nullptr; delete gRegistry; - gRegistry = NULL; + gRegistry = nullptr; + delete alloc_tracker_lock_; + alloc_tracker_lock_ = nullptr; } void Dbg::GcDidFinish() { @@ -3695,15 +3698,15 @@ static size_t GetAllocTrackerMax() { } void Dbg::SetAllocTrackingEnabled(bool enabled) { - MutexLock mu(Thread::Current(), gAllocTrackerLock); + MutexLock mu(Thread::Current(), *alloc_tracker_lock_); if (enabled) { if (recent_allocation_records_ == NULL) { - gAllocRecordMax = GetAllocTrackerMax(); - LOG(INFO) << "Enabling alloc tracker (" << gAllocRecordMax << " entries of " + alloc_record_max_ = GetAllocTrackerMax(); + LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of " << kMaxAllocRecordStackDepth << " frames, taking " - << PrettySize(sizeof(AllocRecord) * gAllocRecordMax) << ")"; - gAllocRecordHead = gAllocRecordCount = 0; - recent_allocation_records_ = new AllocRecord[gAllocRecordMax]; + << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")"; + alloc_record_head_ = alloc_record_count_ = 0; + recent_allocation_records_ = new AllocRecord[alloc_record_max_]; CHECK(recent_allocation_records_ != NULL); } Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); @@ -3750,18 +3753,18 @@ void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) { Thread* self = Thread::Current(); CHECK(self != NULL); - MutexLock mu(self, gAllocTrackerLock); + MutexLock mu(self, *alloc_tracker_lock_); if (recent_allocation_records_ == NULL) { return; } // Advance and clip. - if (++gAllocRecordHead == gAllocRecordMax) { - gAllocRecordHead = 0; + if (++alloc_record_head_ == alloc_record_max_) { + alloc_record_head_ = 0; } // Fill in the basics. - AllocRecord* record = &recent_allocation_records_[gAllocRecordHead]; + AllocRecord* record = &recent_allocation_records_[alloc_record_head_]; record->type = type; record->byte_count = byte_count; record->thin_lock_id = self->GetThreadId(); @@ -3770,8 +3773,8 @@ void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) { AllocRecordStackVisitor visitor(self, record); visitor.WalkStack(); - if (gAllocRecordCount < gAllocRecordMax) { - ++gAllocRecordCount; + if (alloc_record_count_ < alloc_record_max_) { + ++alloc_record_count_; } } @@ -3783,13 +3786,14 @@ void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) { // // We need to handle underflow in our circular buffer, so we add // gAllocRecordMax and then mask it back down. -static inline int HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(gAllocTrackerLock) { - return (gAllocRecordHead+1 + gAllocRecordMax - gAllocRecordCount) & (gAllocRecordMax-1); +size_t Dbg::HeadIndex() { + return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) & + (Dbg::alloc_record_max_ - 1); } void Dbg::DumpRecentAllocations() { ScopedObjectAccess soa(Thread::Current()); - MutexLock mu(soa.Self(), gAllocTrackerLock); + MutexLock mu(soa.Self(), *alloc_tracker_lock_); if (recent_allocation_records_ == NULL) { LOG(INFO) << "Not recording tracked allocations"; return; @@ -3798,9 +3802,9 @@ void Dbg::DumpRecentAllocations() { // "i" is the head of the list. We want to start at the end of the // list and move forward to the tail. size_t i = HeadIndex(); - size_t count = gAllocRecordCount; + size_t count = alloc_record_count_; - LOG(INFO) << "Tracked allocations, (head=" << gAllocRecordHead << " count=" << count << ")"; + LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")"; while (count--) { AllocRecord* record = &recent_allocation_records_[i]; @@ -3820,22 +3824,20 @@ void Dbg::DumpRecentAllocations() { usleep(40000); } - i = (i + 1) & (gAllocRecordMax-1); + i = (i + 1) & (alloc_record_max_ - 1); } } void Dbg::UpdateObjectPointers(IsMarkedCallback* visitor, void* arg) { - { - MutexLock mu(Thread::Current(), gAllocTrackerLock); - if (recent_allocation_records_ != nullptr) { - size_t i = HeadIndex(); - size_t count = gAllocRecordCount; - while (count--) { - AllocRecord* record = &recent_allocation_records_[i]; - DCHECK(record != nullptr); - record->UpdateObjectPointers(visitor, arg); - i = (i + 1) & (gAllocRecordMax - 1); - } + if (recent_allocation_records_ != nullptr) { + MutexLock mu(Thread::Current(), *alloc_tracker_lock_); + size_t i = HeadIndex(); + size_t count = alloc_record_count_; + while (count--) { + AllocRecord* record = &recent_allocation_records_[i]; + DCHECK(record != nullptr); + record->UpdateObjectPointers(visitor, arg); + i = (i + 1) & (alloc_record_max_ - 1); } } if (gRegistry != nullptr) { @@ -3941,7 +3943,7 @@ jbyteArray Dbg::GetRecentAllocations() { Thread* self = Thread::Current(); std::vector<uint8_t> bytes; { - MutexLock mu(self, gAllocTrackerLock); + MutexLock mu(self, *alloc_tracker_lock_); // // Part 1: generate string tables. // @@ -3949,7 +3951,7 @@ jbyteArray Dbg::GetRecentAllocations() { StringTable method_names; StringTable filenames; - int count = gAllocRecordCount; + int count = alloc_record_count_; int idx = HeadIndex(); while (count--) { AllocRecord* record = &recent_allocation_records_[idx]; @@ -3967,10 +3969,10 @@ jbyteArray Dbg::GetRecentAllocations() { } } - idx = (idx + 1) & (gAllocRecordMax-1); + idx = (idx + 1) & (alloc_record_max_ - 1); } - LOG(INFO) << "allocation records: " << gAllocRecordCount; + LOG(INFO) << "allocation records: " << alloc_record_count_; // // Part 2: Generate the output and store it in the buffer. @@ -3991,14 +3993,14 @@ jbyteArray Dbg::GetRecentAllocations() { // (2b) number of class name strings // (2b) number of method name strings // (2b) number of source file name strings - JDWP::Append2BE(bytes, gAllocRecordCount); + JDWP::Append2BE(bytes, alloc_record_count_); size_t string_table_offset = bytes.size(); JDWP::Append4BE(bytes, 0); // We'll patch this later... JDWP::Append2BE(bytes, class_names.Size()); JDWP::Append2BE(bytes, method_names.Size()); JDWP::Append2BE(bytes, filenames.Size()); - count = gAllocRecordCount; + count = alloc_record_count_; idx = HeadIndex(); while (count--) { // For each entry: @@ -4032,7 +4034,7 @@ jbyteArray Dbg::GetRecentAllocations() { JDWP::Append2BE(bytes, record->stack[stack_frame].LineNumber()); } - idx = (idx + 1) & (gAllocRecordMax-1); + idx = (idx + 1) & (alloc_record_max_ - 1); } // (xb) class name strings diff --git a/runtime/debugger.h b/runtime/debugger.h index 5d269ee..6c44bde 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -391,7 +391,7 @@ class Dbg { LOCKS_EXCLUDED(Locks::deoptimization_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void DisableFullDeoptimization() - EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) + LOCKS_EXCLUDED(Locks::deoptimization_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Manage deoptimization after updating JDWP events list. This must be done while all mutator @@ -448,8 +448,11 @@ class Dbg { static void RecordAllocation(mirror::Class* type, size_t byte_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void SetAllocTrackingEnabled(bool enabled); - static inline bool IsAllocTrackingEnabled() { return recent_allocation_records_ != NULL; } + static bool IsAllocTrackingEnabled() { + return recent_allocation_records_ != nullptr; + } static jbyteArray GetRecentAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static size_t HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(alloc_tracker_lock_); static void DumpRecentAllocations(); // Updates the stored direct object pointers (called from SweepSystemWeaks). @@ -488,7 +491,14 @@ class Dbg { static void PostThreadStartOrStop(Thread*, uint32_t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static AllocRecord* recent_allocation_records_; + static Mutex* alloc_tracker_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + + static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(alloc_tracker_lock_); + static size_t alloc_record_max_ GUARDED_BY(alloc_tracker_lock_); + static size_t alloc_record_head_ GUARDED_BY(alloc_tracker_lock_); + static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_); + + DISALLOW_COPY_AND_ASSIGN(Dbg); }; #define CHUNK_TYPE(_name) \ diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index a8fb6c1..498ac2c 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -29,7 +29,6 @@ #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/throwable.h" -#include "locks.h" #include "object_utils.h" #include "sirt_ref.h" #include "thread.h" @@ -642,8 +641,7 @@ static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, } static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(monitor_lock_) { + NO_THREAD_SAFETY_ANALYSIS /* SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) */ { // Save any pending exception over monitor exit call. mirror::Throwable* saved_exception = NULL; ThrowLocation saved_throw_location; diff --git a/runtime/entrypoints/portable/portable_jni_entrypoints.cc b/runtime/entrypoints/portable/portable_jni_entrypoints.cc index de1e32e..17ad4d0 100644 --- a/runtime/entrypoints/portable/portable_jni_entrypoints.cc +++ b/runtime/entrypoints/portable/portable_jni_entrypoints.cc @@ -23,7 +23,7 @@ namespace art { // Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. extern "C" uint32_t art_portable_jni_method_start(Thread* self) - UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { + UNLOCK_FUNCTION(Locks::mutator_lock_) { JNIEnvExt* env = self->GetJniEnv(); uint32_t saved_local_ref_cookie = env->local_ref_cookie; env->local_ref_cookie = env->locals.GetSegmentState(); @@ -32,7 +32,7 @@ extern "C" uint32_t art_portable_jni_method_start(Thread* self) } extern "C" uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) { + UNLOCK_FUNCTION(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS { self->DecodeJObject(to_lock)->MonitorEnter(self); return art_portable_jni_method_start(self); } diff --git a/runtime/entrypoints/portable/portable_lock_entrypoints.cc b/runtime/entrypoints/portable/portable_lock_entrypoints.cc index 44d3da9..358ac23 100644 --- a/runtime/entrypoints/portable/portable_lock_entrypoints.cc +++ b/runtime/entrypoints/portable/portable_lock_entrypoints.cc @@ -20,8 +20,9 @@ namespace art { extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry. + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ { + DCHECK(obj != nullptr); // Assumed to have been checked before entry. obj->MonitorEnter(thread); // May block. DCHECK(thread->HoldsLock(obj)); // Only possible exception is NPE and is handled before entry. @@ -29,8 +30,9 @@ extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* } extern "C" void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread) - UNLOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry. + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ { + DCHECK(obj != nullptr); // Assumed to have been checked before entry. // MonitorExit may throw exception. obj->MonitorExit(thread); } diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h index 8f70049..3fd4adc 100644 --- a/runtime/entrypoints/quick/callee_save_frame.h +++ b/runtime/entrypoints/quick/callee_save_frame.h @@ -26,8 +26,8 @@ class ArtMethod; } // namespace mirror // Place a special frame at the TOS that will save the callee saves for the given type. -static void FinishCalleeSaveFrameSetup(Thread* self, mirror::ArtMethod** sp, - Runtime::CalleeSaveType type) +static inline void FinishCalleeSaveFrameSetup(Thread* self, mirror::ArtMethod** sp, + Runtime::CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Be aware the store below may well stomp on an incoming argument. Locks::mutator_lock_->AssertSharedHeld(self); diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc index 5bc7f4c..817d053 100644 --- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc @@ -21,7 +21,8 @@ namespace art { extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); if (UNLIKELY(obj == NULL)) { ThrowLocation throw_location(self->GetCurrentLocationForThrow()); @@ -42,7 +43,8 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror:: } extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp) - UNLOCK_FUNCTION(monitor_lock_) { + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ { FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); if (UNLIKELY(obj == NULL)) { ThrowLocation throw_location(self->GetCurrentLocationForThrow()); diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h index bb4d1d7..8b7bfd3 100644 --- a/runtime/gc/accounting/card_table.h +++ b/runtime/gc/accounting/card_table.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_GC_ACCOUNTING_CARD_TABLE_H_ #define ART_RUNTIME_GC_ACCOUNTING_CARD_TABLE_H_ +#include "base/mutex.h" #include "globals.h" -#include "locks.h" #include "mem_map.h" #include "UniquePtr.h" diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h index dde1425..7cfeb63 100644 --- a/runtime/gc/accounting/heap_bitmap.h +++ b/runtime/gc/accounting/heap_bitmap.h @@ -19,7 +19,6 @@ #include "base/logging.h" #include "gc_allocator.h" -#include "locks.h" #include "object_callbacks.h" #include "space_bitmap.h" diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index 06127c1..8871921 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -175,7 +175,6 @@ class CheckReferenceVisitor { } // Extra parameters are required since we use this same visitor signature for checking objects. - // TODO: Fixme when anotatalysis works with visitors. void operator()(Object* obj, Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 3c4b674..5fd2bce 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -17,9 +17,9 @@ #ifndef ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_H_ #define ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_H_ +#include "base/mutex.h" #include "gc_allocator.h" #include "globals.h" -#include "locks.h" #include "mem_map.h" #include "object_callbacks.h" #include "UniquePtr.h" @@ -248,8 +248,7 @@ class ObjectSet { contained_ = space_set.contained_; } - void Walk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_); + void Walk(ObjectCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); template <typename Visitor> void Visit(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS { diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 088f1d4..8d401b8 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -18,10 +18,10 @@ #define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_ #include "base/histogram.h" +#include "base/mutex.h" #include "base/timing_logger.h" #include "gc/gc_cause.h" #include "gc_type.h" -#include "locks.h" #include <stdint.h> #include <vector> diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index 5c0a233..8d40c34 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h @@ -114,7 +114,7 @@ class MarkSweep : public GarbageCollector { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsImmuneSpace(const space::ContinuousSpace* space) const; + bool IsImmuneSpace(const space::ContinuousSpace* space) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie @@ -152,6 +152,7 @@ class MarkSweep : public GarbageCollector { // Sweep only pointers within an array. WARNING: Trashes objects. void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Blackens an object. diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h index 44ae9e9..ac0d068 100644 --- a/runtime/gc/collector/partial_mark_sweep.h +++ b/runtime/gc/collector/partial_mark_sweep.h @@ -17,7 +17,6 @@ #ifndef ART_RUNTIME_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_ #define ART_RUNTIME_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_ -#include "locks.h" #include "mark_sweep.h" namespace art { diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index a577f90..2da360f 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -678,13 +678,14 @@ void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); } -// Visit all of the references of an object and update. -void SemiSpace::ScanObject(Object* obj) { - DCHECK(obj != NULL); - DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; - MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, - bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { - mirror::Object* new_address = MarkObject(ref); +class SemiSpaceMarkObjectVisitor { + public: + explicit SemiSpaceMarkObjectVisitor(SemiSpace* semi_space) : semi_space_(semi_space) { + } + + void operator()(Object* obj, Object* ref, const MemberOffset& offset, bool /* is_static */) + const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) */ { + mirror::Object* new_address = semi_space_->MarkObject(ref); if (new_address != ref) { DCHECK(new_address != nullptr); // Don't need to mark the card since we updating the object address and not changing the @@ -694,7 +695,17 @@ void SemiSpace::ScanObject(Object* obj) { // disable check as we could run inside a transaction. obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false); } - }, kMovingClasses); + } + private: + SemiSpace* const semi_space_; +}; + +// Visit all of the references of an object and update. +void SemiSpace::ScanObject(Object* obj) { + DCHECK(obj != NULL); + DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; + SemiSpaceMarkObjectVisitor visitor(this); + MarkSweep::VisitObjectReferences(obj, visitor, kMovingClasses); mirror::Class* klass = obj->GetClass<kVerifyNone>(); if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) { DelayReferenceReferent(klass, obj); diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h index 98f2b59..934b1bd 100644 --- a/runtime/gc/collector/sticky_mark_sweep.h +++ b/runtime/gc/collector/sticky_mark_sweep.h @@ -18,7 +18,6 @@ #define ART_RUNTIME_GC_COLLECTOR_STICKY_MARK_SWEEP_H_ #include "base/macros.h" -#include "locks.h" #include "partial_mark_sweep.h" namespace art { @@ -43,7 +42,9 @@ class StickyMarkSweep FINAL : public PartialMarkSweep { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void Sweep(bool swap_bitmaps) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void Sweep(bool swap_bitmaps) OVERRIDE + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Don't need to do anything special here since we scan all the cards which may have references // to the newly allocated objects. diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index e5c8ff1..4c4e943 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -31,7 +31,6 @@ #include "globals.h" #include "gtest/gtest.h" #include "jni.h" -#include "locks.h" #include "object_callbacks.h" #include "offsets.h" #include "reference_queue.h" @@ -684,7 +683,7 @@ class Heap { void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); static void VerificationCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); + SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Swap the allocation stack with the live stack. void SwapStacks(Thread* self); diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h index e12a95f..99314ba 100644 --- a/runtime/gc/reference_queue.h +++ b/runtime/gc/reference_queue.h @@ -26,7 +26,6 @@ #include "globals.h" #include "gtest/gtest.h" #include "jni.h" -#include "locks.h" #include "object_callbacks.h" #include "offsets.h" #include "thread_pool.h" diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index 2c9d35f..031fccd 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -146,9 +146,6 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); - mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated) - EXCLUSIVE_LOCKS_REQUIRED(lock_); - // The main block is an unbounded block where objects go when there are no other blocks. This // enables us to maintain tightly packed objects when you are not using thread local buffers for // allocation. The main block starts at the space Begin(). diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h index 413fc1d..ea0d290 100644 --- a/runtime/gc/space/space_test.h +++ b/runtime/gc/space/space_test.h @@ -75,7 +75,7 @@ class SpaceTest : public CommonRuntimeTest { void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space); }; -static size_t test_rand(size_t* seed) { +static inline size_t test_rand(size_t* seed) { *seed = *seed * 1103515245 + 12345; return *seed; } diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 82b53f6..c8855e3 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -27,6 +27,32 @@ namespace art { +template<typename T> +class MutatorLockedDumpable { + public: + explicit MutatorLockedDumpable(T& value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) { + } + + void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + value_.Dump(os); + } + + private: + T& value_; + + DISALLOW_COPY_AND_ASSIGN(MutatorLockedDumpable); +}; + +template<typename T> +std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) +// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis +// currently fails for this. + NO_THREAD_SAFETY_ANALYSIS { + rhs.Dump(os); + return os; +} + static void AbortMaybe() { // If -Xcheck:jni is on, it'll give a more detailed error before aborting. if (!Runtime::Current()->GetJavaVM()->check_jni) { diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h index 9d2fa35..9a8e4f2 100644 --- a/runtime/indirect_reference_table.h +++ b/runtime/indirect_reference_table.h @@ -23,6 +23,7 @@ #include <string> #include "base/logging.h" +#include "base/mutex.h" #include "object_callbacks.h" #include "offsets.h" diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h index e04d7b2..e9356e0 100644 --- a/runtime/instrumentation.h +++ b/runtime/instrumentation.h @@ -19,7 +19,7 @@ #include "atomic.h" #include "base/macros.h" -#include "locks.h" +#include "base/mutex.h" #include <stdint.h> #include <set> diff --git a/runtime/intern_table.h b/runtime/intern_table.h index fd921f3..7dd06c6 100644 --- a/runtime/intern_table.h +++ b/runtime/intern_table.h @@ -17,12 +17,11 @@ #ifndef ART_RUNTIME_INTERN_TABLE_H_ #define ART_RUNTIME_INTERN_TABLE_H_ +#include <map> + #include "base/mutex.h" -#include "locks.h" #include "object_callbacks.h" -#include <map> - namespace art { enum VisitRootFlags : uint8_t; diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h index efe11fc..0750eb5 100644 --- a/runtime/interpreter/interpreter.h +++ b/runtime/interpreter/interpreter.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_H_ #define ART_RUNTIME_INTERPRETER_INTERPRETER_H_ +#include "base/mutex.h" #include "dex_file.h" -#include "locks.h" namespace art { namespace mirror { diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h index fdbdfeb..fec0e31 100644 --- a/runtime/jdwp/jdwp.h +++ b/runtime/jdwp/jdwp.h @@ -31,11 +31,13 @@ struct iovec; namespace art { - union JValue; + +union JValue; +class Thread; + namespace mirror { class ArtMethod; } // namespace mirror -class Thread; namespace JDWP { @@ -156,7 +158,7 @@ struct JdwpState { // ObjectId GetWaitForEventThread(); void SetWaitForEventThread(ObjectId threadId) LOCKS_EXCLUDED(event_thread_lock_, process_request_lock_); - void ClearWaitForEventThread() LOCKS_EXCLUDED(event_thread_lock); + void ClearWaitForEventThread() LOCKS_EXCLUDED(event_thread_lock_); /* * These notify the debug code that something interesting has happened. This @@ -334,6 +336,7 @@ struct JdwpState { // Linked list of events requested by the debugger (breakpoints, class prep, etc). Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + JdwpEvent* event_list_ GUARDED_BY(event_list_lock_); size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_. size_t full_deoptimization_requests_ GUARDED_BY(event_list_lock_); // Number of events requiring diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index 1bcb8dd..4fad5c9 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -2466,8 +2466,7 @@ class JNI { return JNI_OK; } - static jint MonitorEnter(JNIEnv* env, jobject java_object) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { + static jint MonitorEnter(JNIEnv* env, jobject java_object) NO_THREAD_SAFETY_ANALYSIS { CHECK_NON_NULL_ARGUMENT(MonitorEnter, java_object); ScopedObjectAccess soa(env); mirror::Object* o = soa.Decode<mirror::Object*>(java_object); @@ -2479,8 +2478,7 @@ class JNI { return JNI_OK; } - static jint MonitorExit(JNIEnv* env, jobject java_object) - UNLOCK_FUNCTION(monitor_lock_) { + static jint MonitorExit(JNIEnv* env, jobject java_object) NO_THREAD_SAFETY_ANALYSIS { CHECK_NON_NULL_ARGUMENT(MonitorExit, java_object); ScopedObjectAccess soa(env); mirror::Object* o = soa.Decode<mirror::Object*>(java_object); diff --git a/runtime/locks.cc b/runtime/locks.cc deleted file mode 100644 index 246e339..0000000 --- a/runtime/locks.cc +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "locks.h" - -#include "base/mutex.h" - -namespace art { - -Mutex* Locks::abort_lock_ = NULL; -Mutex* Locks::breakpoint_lock_ = NULL; -Mutex* Locks::deoptimization_lock_ = NULL; -ReaderWriterMutex* Locks::classlinker_classes_lock_ = NULL; -ReaderWriterMutex* Locks::heap_bitmap_lock_ = NULL; -Mutex* Locks::logging_lock_ = NULL; -ReaderWriterMutex* Locks::mutator_lock_ = NULL; -Mutex* Locks::runtime_shutdown_lock_ = NULL; -Mutex* Locks::thread_list_lock_ = NULL; -Mutex* Locks::thread_suspend_count_lock_ = NULL; -Mutex* Locks::trace_lock_ = NULL; -Mutex* Locks::profiler_lock_ = NULL; -Mutex* Locks::unexpected_signal_lock_ = NULL; -Mutex* Locks::intern_table_lock_ = NULL; - -void Locks::Init() { - if (logging_lock_ != NULL) { - // Already initialized. - DCHECK(abort_lock_ != NULL); - DCHECK(breakpoint_lock_ != NULL); - DCHECK(deoptimization_lock_ != NULL); - DCHECK(classlinker_classes_lock_ != NULL); - DCHECK(heap_bitmap_lock_ != NULL); - DCHECK(logging_lock_ != NULL); - DCHECK(mutator_lock_ != NULL); - DCHECK(thread_list_lock_ != NULL); - DCHECK(thread_suspend_count_lock_ != NULL); - DCHECK(trace_lock_ != NULL); - DCHECK(profiler_lock_ != NULL); - DCHECK(unexpected_signal_lock_ != NULL); - DCHECK(intern_table_lock_ != NULL); - } else { - logging_lock_ = new Mutex("logging lock", kLoggingLock, true); - abort_lock_ = new Mutex("abort lock", kAbortLock, true); - - DCHECK(breakpoint_lock_ == NULL); - breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock); - DCHECK(deoptimization_lock_ == NULL); - deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock); - DCHECK(classlinker_classes_lock_ == NULL); - classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", - kClassLinkerClassesLock); - DCHECK(heap_bitmap_lock_ == NULL); - heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock); - DCHECK(mutator_lock_ == NULL); - mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock); - DCHECK(runtime_shutdown_lock_ == NULL); - runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock); - DCHECK(thread_list_lock_ == NULL); - thread_list_lock_ = new Mutex("thread list lock", kThreadListLock); - DCHECK(thread_suspend_count_lock_ == NULL); - thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock); - DCHECK(trace_lock_ == NULL); - trace_lock_ = new Mutex("trace lock", kTraceLock); - DCHECK(profiler_lock_ == NULL); - profiler_lock_ = new Mutex("profiler lock", kProfilerLock); - DCHECK(unexpected_signal_lock_ == NULL); - unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true); - DCHECK(intern_table_lock_ == NULL); - intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock); - } -} - -} // namespace art diff --git a/runtime/locks.h b/runtime/locks.h deleted file mode 100644 index 4343ab4..0000000 --- a/runtime/locks.h +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_LOCKS_H_ -#define ART_RUNTIME_LOCKS_H_ - -#include <ostream> - -#include "base/macros.h" - -namespace art { - -class LOCKABLE Mutex; -class LOCKABLE ReaderWriterMutex; - -// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or -// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free -// partial ordering and thereby cause deadlock situations to fail checks. -// -// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163 -enum LockLevel { - kLoggingLock = 0, - kUnexpectedSignalLock, - kThreadSuspendCountLock, - kAbortLock, - kJdwpSocketLock, - kRosAllocGlobalLock, - kRosAllocBracketLock, - kRosAllocBulkFreeLock, - kAllocSpaceLock, - kDexFileMethodInlinerLock, - kDexFileToMethodInlinerMapLock, - kMarkSweepMarkStackLock, - kTransactionLogLock, - kInternTableLock, - kMonitorPoolLock, - kDefaultMutexLevel, - kMarkSweepLargeObjectLock, - kPinTableLock, - kLoadLibraryLock, - kJdwpObjectRegistryLock, - kClassLinkerClassesLock, - kBreakpointLock, - kMonitorLock, - kThreadListLock, - kBreakpointInvokeLock, - kDeoptimizationLock, - kTraceLock, - kProfilerLock, - kJdwpEventListLock, - kJdwpAttachLock, - kJdwpStartLock, - kRuntimeShutdownLock, - kHeapBitmapLock, - kMutatorLock, - kZygoteCreationLock, - - kLockLevelCount // Must come last. -}; -std::ostream& operator<<(std::ostream& os, const LockLevel& rhs); - -// Global mutexes corresponding to the levels above. -class Locks { - public: - static void Init(); - - // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block - // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds - // a share on the mutator_lock_. The garbage collector may also execute with shared access but - // at times requires exclusive access to the heap (not to be confused with the heap meta-data - // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks - // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_ - // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition - // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on - // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector) - // chance to acquire the lock. - // - // Thread suspension: - // Shared users | Exclusive user - // (holding mutator lock and in kRunnable state) | .. running .. - // .. running .. | Request thread suspension by: - // .. running .. | - acquiring thread_suspend_count_lock_ - // .. running .. | - incrementing Thread::suspend_count_ on - // .. running .. | all mutator threads - // .. running .. | - releasing thread_suspend_count_lock_ - // .. running .. | Block trying to acquire exclusive mutator lock - // Poll Thread::suspend_count_ and enter full | .. blocked .. - // suspend code. | .. blocked .. - // Change state to kSuspended | .. blocked .. - // x: Release share on mutator_lock_ | Carry out exclusive access - // Acquire thread_suspend_count_lock_ | .. exclusive .. - // while Thread::suspend_count_ > 0 | .. exclusive .. - // - wait on Thread::resume_cond_ | .. exclusive .. - // (releases thread_suspend_count_lock_) | .. exclusive .. - // .. waiting .. | Release mutator_lock_ - // .. waiting .. | Request thread resumption by: - // .. waiting .. | - acquiring thread_suspend_count_lock_ - // .. waiting .. | - decrementing Thread::suspend_count_ on - // .. waiting .. | all mutator threads - // .. waiting .. | - notifying on Thread::resume_cond_ - // - re-acquire thread_suspend_count_lock_ | - releasing thread_suspend_count_lock_ - // Release thread_suspend_count_lock_ | .. running .. - // Acquire share on mutator_lock_ | .. running .. - // - This could block but the thread still | .. running .. - // has a state of kSuspended and so this | .. running .. - // isn't an issue. | .. running .. - // Acquire thread_suspend_count_lock_ | .. running .. - // - we poll here as we're transitioning into | .. running .. - // kRunnable and an individual thread suspend | .. running .. - // request (e.g for debugging) won't try | .. running .. - // to acquire the mutator lock (which would | .. running .. - // block as we hold the mutator lock). This | .. running .. - // poll ensures that if the suspender thought | .. running .. - // we were suspended by incrementing our | .. running .. - // Thread::suspend_count_ and then reading | .. running .. - // our state we go back to waiting on | .. running .. - // Thread::resume_cond_. | .. running .. - // can_go_runnable = Thread::suspend_count_ == 0 | .. running .. - // Release thread_suspend_count_lock_ | .. running .. - // if can_go_runnable | .. running .. - // Change state to kRunnable | .. running .. - // else | .. running .. - // Goto x | .. running .. - // .. running .. | .. running .. - static ReaderWriterMutex* mutator_lock_; - - // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap. - static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_); - - // Guards shutdown of the runtime. - static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_); - - // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads - // attaching and detaching. - static Mutex* thread_list_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_); - - // Guards breakpoints. - static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_); - - // Guards deoptimization requests. - static Mutex* deoptimization_lock_ ACQUIRED_AFTER(breakpoint_lock_); - - // Guards trace requests. - static Mutex* trace_lock_ ACQUIRED_AFTER(deoptimization_lock_); - - // Guards profile objects. - static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_); - - // Guards lists of classes within the class linker. - static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_); - - // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code - // doesn't try to hold a higher level Mutex. - #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(classlinker_classes_lock_) - - // Guards intern table. - static Mutex* intern_table_lock_ ACQUIRED_AFTER(classlinker_classes_lock_); - - // Have an exclusive aborting thread. - static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_); - - // Allow mutual exclusion when manipulating Thread::suspend_count_. - // TODO: Does the trade-off of a per-thread lock make sense? - static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_); - - // One unexpected signal at a time lock. - static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_); - - // Have an exclusive logging thread. - static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); -}; - -} // namespace art - -#endif // ART_RUNTIME_LOCKS_H_ diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h index a18e171..a61698d 100644 --- a/runtime/mirror/art_method.h +++ b/runtime/mirror/art_method.h @@ -20,7 +20,6 @@ #include "class.h" #include "dex_file.h" #include "invoke_type.h" -#include "locks.h" #include "modifiers.h" #include "object.h" #include "object_callbacks.h" diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index ded4e0a..4e2c624 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -21,6 +21,7 @@ #include "base/logging.h" #include "base/macros.h" #include "cutils/atomic-inline.h" +#include "monitor.h" #include "object_reference.h" #include "offsets.h" #include "runtime.h" @@ -30,7 +31,6 @@ namespace art { class ImageWriter; class LockWord; -class Monitor; struct ObjectOffsets; class Thread; template <typename T> class SirtRef; @@ -64,7 +64,7 @@ class Throwable; static constexpr bool kCheckFieldAssignments = false; // C++ mirror of java.lang.Object -class MANAGED Object { +class MANAGED LOCKABLE Object { public: static MemberOffset ClassOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, klass_); @@ -104,9 +104,9 @@ class MANAGED Object { uint32_t GetLockOwnerThreadId(); mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_); + EXCLUSIVE_LOCK_FUNCTION(); bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(monitor_lock_); + UNLOCK_FUNCTION(); void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h index 94869e5..72f281d 100644 --- a/runtime/mirror/object_reference.h +++ b/runtime/mirror/object_reference.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_ #define ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_ +#include "base/mutex.h" #include "globals.h" -#include "locks.h" namespace art { namespace mirror { diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 64794fe..332aef0 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -650,9 +650,22 @@ void Monitor::InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, Lock } } +// Fool annotalysis into thinking that the lock on obj is acquired. +static mirror::Object* FakeLock(mirror::Object* obj) + EXCLUSIVE_LOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS { + return obj; +} + +// Fool annotalysis into thinking that the lock on obj is release. +static mirror::Object* FakeUnlock(mirror::Object* obj) + UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS { + return obj; +} + mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { DCHECK(self != NULL); DCHECK(obj != NULL); + obj = FakeLock(obj); uint32_t thread_id = self->GetThreadId(); size_t contention_count = 0; SirtRef<mirror::Object> sirt_obj(self, obj); @@ -698,24 +711,22 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { mon->Lock(self); return sirt_obj.get(); // Success! } - case LockWord::kHashCode: { + case LockWord::kHashCode: // Inflate with the existing hashcode. Inflate(self, nullptr, sirt_obj.get(), lock_word.GetHashCode()); - break; - } + continue; // Start from the beginning. default: { LOG(FATAL) << "Invalid monitor state " << lock_word.GetState(); return sirt_obj.get(); } } } - return sirt_obj.get(); } bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) { DCHECK(self != NULL); DCHECK(obj != NULL); - + obj = FakeUnlock(obj); LockWord lock_word = obj->GetLockWord(); SirtRef<mirror::Object> sirt_obj(self, obj); switch (lock_word.GetState()) { diff --git a/runtime/monitor.h b/runtime/monitor.h index d0a3a2e..eb07196 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -27,7 +27,6 @@ #include "atomic.h" #include "base/mutex.h" #include "object_callbacks.h" -#include "sirt_ref.h" #include "thread_state.h" namespace art { @@ -37,6 +36,7 @@ namespace mirror { class Object; } // namespace mirror class LockWord; +template<class T> class SirtRef; class Thread; class StackVisitor; @@ -58,11 +58,11 @@ class Monitor { NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy. static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) + EXCLUSIVE_LOCK_FUNCTION(obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static bool MonitorExit(Thread* thread, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(monitor_lock_); + UNLOCK_FUNCTION(obj); static void Notify(Thread* self, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -178,6 +178,7 @@ class Monitor { static uint32_t lock_profiling_threshold_; Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + ConditionVariable monitor_contenders_ GUARDED_BY(monitor_lock_); // Number of people waiting on the condition. diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h index 794878a..374a80e 100644 --- a/runtime/nth_caller_visitor.h +++ b/runtime/nth_caller_visitor.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_NTH_CALLER_VISITOR_H_ #define ART_RUNTIME_NTH_CALLER_VISITOR_H_ +#include "base/mutex.h" #include "mirror/art_method.h" -#include "locks.h" #include "stack.h" namespace art { diff --git a/runtime/profiler.h b/runtime/profiler.h index e3af47c..6ea6c84 100644 --- a/runtime/profiler.h +++ b/runtime/profiler.h @@ -22,15 +22,14 @@ #include <string> #include <vector> +#include "barrier.h" #include "base/macros.h" +#include "base/mutex.h" #include "globals.h" #include "instrumentation.h" #include "os.h" #include "safe_map.h" -#include "base/mutex.h" -#include "locks.h" #include "UniquePtr.h" -#include "barrier.h" namespace art { diff --git a/runtime/reference_table.h b/runtime/reference_table.h index c9f5bc5..45309c9 100644 --- a/runtime/reference_table.h +++ b/runtime/reference_table.h @@ -22,8 +22,8 @@ #include <string> #include <vector> +#include "base/mutex.h" #include "object_callbacks.h" -#include "locks.h" namespace art { namespace mirror { diff --git a/runtime/runtime.cc b/runtime/runtime.cc index de06fb8..fdbf245 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -94,7 +94,7 @@ Runtime::Runtime() default_imt_(nullptr), fault_message_lock_("Fault message lock"), fault_message_(""), - method_verifiers_lock_("Method verifiers lock"), + method_verifier_lock_("Method verifiers lock"), threads_being_born_(0), shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)), shutting_down_(false), @@ -851,7 +851,7 @@ void Runtime::VisitNonThreadRoots(RootCallback* callback, void* arg) { } } { - MutexLock mu(Thread::Current(), method_verifiers_lock_); + MutexLock mu(Thread::Current(), method_verifier_lock_); for (verifier::MethodVerifier* verifier : method_verifiers_) { verifier->VisitRoots(callback, arg); } @@ -1043,13 +1043,13 @@ void Runtime::SetCompileTimeClassPath(jobject class_loader, void Runtime::AddMethodVerifier(verifier::MethodVerifier* verifier) { DCHECK(verifier != nullptr); - MutexLock mu(Thread::Current(), method_verifiers_lock_); + MutexLock mu(Thread::Current(), method_verifier_lock_); method_verifiers_.insert(verifier); } void Runtime::RemoveMethodVerifier(verifier::MethodVerifier* verifier) { DCHECK(verifier != nullptr); - MutexLock mu(Thread::Current(), method_verifiers_lock_); + MutexLock mu(Thread::Current(), method_verifier_lock_); auto it = method_verifiers_.find(verifier); CHECK(it != method_verifiers_.end()); method_verifiers_.erase(it); diff --git a/runtime/runtime.h b/runtime/runtime.h index 87307ae..65d296a 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -33,7 +33,6 @@ #include "instruction_set.h" #include "instrumentation.h" #include "jobject_comparator.h" -#include "locks.h" #include "object_callbacks.h" #include "runtime_stats.h" #include "safe_map.h" @@ -471,7 +470,7 @@ class Runtime { std::string fault_message_ GUARDED_BY(fault_message_lock_); // Method verifier set, used so that we can update their GC roots. - Mutex method_verifiers_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + Mutex method_verifier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::set<verifier::MethodVerifier*> method_verifiers_; // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h index b22e816..38e652a 100644 --- a/runtime/sirt_ref.h +++ b/runtime/sirt_ref.h @@ -20,6 +20,7 @@ #include "base/casts.h" #include "base/logging.h" #include "base/macros.h" +#include "stack_indirect_reference_table.h" #include "thread.h" namespace art { diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index f7e88cc..66077f9 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -146,9 +146,10 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() { if (UNLIKELY(!done)) { // Failed to transition to Runnable. Release shared mutator_lock_ access and try again. Locks::mutator_lock_->SharedUnlock(this); + } else { + return static_cast<ThreadState>(old_state); } - } while (UNLIKELY(!done)); - return static_cast<ThreadState>(old_state); + } while (true); } inline void Thread::VerifyStack() { diff --git a/runtime/thread.h b/runtime/thread.h index c7ab735..2ebc107 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -24,13 +24,13 @@ #include <string> #include "base/macros.h" +#include "base/mutex.h" #include "entrypoints/interpreter/interpreter_entrypoints.h" #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/portable/portable_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "globals.h" #include "jvalue.h" -#include "locks.h" #include "object_callbacks.h" #include "offsets.h" #include "runtime_stats.h" diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 7745a19..bddebbd 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -151,7 +151,8 @@ void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread #if HAVE_TIMED_RWLOCK // Attempt to rectify locks so that we dump thread list with required locks before exiting. -static void UnsafeLogFatalForThreadSuspendAllTimeout(Thread* self) NO_THREAD_SAFETY_ANALYSIS { +static void UnsafeLogFatalForThreadSuspendAllTimeout(Thread* self) NO_THREAD_SAFETY_ANALYSIS __attribute__((noreturn)); +static void UnsafeLogFatalForThreadSuspendAllTimeout(Thread* self) { Runtime* runtime = Runtime::Current(); std::ostringstream ss; ss << "Thread suspend timeout\n"; @@ -159,6 +160,7 @@ static void UnsafeLogFatalForThreadSuspendAllTimeout(Thread* self) NO_THREAD_SAF ss << "\n"; runtime->GetThreadList()->DumpLocked(ss); LOG(FATAL) << ss.str(); + exit(0); } #endif @@ -297,7 +299,7 @@ void ThreadList::SuspendAll() { // Block on the mutator lock until all Runnable threads release their share of access. #if HAVE_TIMED_RWLOCK // Timeout if we wait more than 30 seconds. - if (UNLIKELY(!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0))) { + if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) { UnsafeLogFatalForThreadSuspendAllTimeout(self); } #else diff --git a/runtime/thread_list.h b/runtime/thread_list.h index e98aed9..1a76705 100644 --- a/runtime/thread_list.h +++ b/runtime/thread_list.h @@ -86,7 +86,7 @@ class ThreadList { // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside // of the suspend check. Returns how many checkpoints we should expect to run. - size_t RunCheckpoint(Closure* checkpoint_function); + size_t RunCheckpoint(Closure* checkpoint_function) LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_); diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index e8f9afe..b8735a3 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -23,7 +23,6 @@ #include "barrier.h" #include "base/mutex.h" #include "closure.h" -#include "locks.h" #include "mem_map.h" namespace art { diff --git a/runtime/throw_location.h b/runtime/throw_location.h index f30aa4e..c171b07 100644 --- a/runtime/throw_location.h +++ b/runtime/throw_location.h @@ -19,6 +19,7 @@ #include "object_callbacks.h" #include "base/macros.h" +#include "base/mutex.h" #include <stdint.h> #include <string> diff --git a/runtime/transaction.h b/runtime/transaction.h index 68f9540..cf696de 100644 --- a/runtime/transaction.h +++ b/runtime/transaction.h @@ -19,10 +19,9 @@ #include "base/macros.h" #include "base/mutex.h" -#include "locks.h" +#include "object_callbacks.h" #include "offsets.h" #include "primitive.h" -#include "object_callbacks.h" #include "safe_map.h" #include <list> diff --git a/runtime/utf.h b/runtime/utf.h index 5b2289e..29f8499 100644 --- a/runtime/utf.h +++ b/runtime/utf.h @@ -18,6 +18,7 @@ #define ART_RUNTIME_UTF_H_ #include "base/macros.h" +#include "base/mutex.h" #include <stddef.h> #include <stdint.h> diff --git a/runtime/verify_object.h b/runtime/verify_object.h index b39df4a..6640e0d 100644 --- a/runtime/verify_object.h +++ b/runtime/verify_object.h @@ -17,10 +17,10 @@ #ifndef ART_RUNTIME_VERIFY_OBJECT_H_ #define ART_RUNTIME_VERIFY_OBJECT_H_ -#include "locks.h" - #include <stdint.h> +#include "base/macros.h" + namespace art { namespace mirror { @@ -52,10 +52,10 @@ static constexpr VerifyObjectFlags kDefaultVerifyFlags = kVerifyNone; static constexpr VerifyObjectMode kVerifyObjectSupport = kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled; -ALWAYS_INLINE inline void VerifyObject(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; +void VerifyObject(mirror::Object* obj) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS; // Check that c.getClass() == c.getClass().getClass(). -ALWAYS_INLINE inline bool VerifyClassClass(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS; +bool VerifyClassClass(mirror::Class* c) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS; } // namespace art |