summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-10-25 10:05:23 -0700
committerMathieu Chartier <mathieuc@google.com>2013-10-29 12:14:36 -0700
commitad2541a59c00c2c69e8973088891a2b5257c9780 (patch)
tree523898cf039c5185352978e71a54fa3a2657a04c
parent9780099e445884d8bc9444c8c1261b02d80a26c7 (diff)
downloadart-ad2541a59c00c2c69e8973088891a2b5257c9780.zip
art-ad2541a59c00c2c69e8973088891a2b5257c9780.tar.gz
art-ad2541a59c00c2c69e8973088891a2b5257c9780.tar.bz2
Fix object identity hash.
The object identity hash is now stored in the monitor word after being computed. Hashes are computed by a pseudo random number generator. When we write the image, we eagerly compute object hashes to prevent pages getting dirtied. Bug: 8981901 Change-Id: Ic8edacbacb0afc7055fd740a52444929f88ed564
-rw-r--r--compiler/image_writer.cc26
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S19
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S6
-rw-r--r--runtime/gc/collector/mark_sweep.h6
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/lock_word-inl.h9
-rw-r--r--runtime/lock_word.h63
-rw-r--r--runtime/locks.h2
-rw-r--r--runtime/mirror/object-inl.h2
-rw-r--r--runtime/mirror/object.cc48
-rw-r--r--runtime/mirror/object.h16
-rw-r--r--runtime/monitor.cc122
-rw-r--r--runtime/monitor.h21
-rw-r--r--runtime/native/java_lang_System.cc3
-rw-r--r--runtime/runtime.h3
15 files changed, 258 insertions, 91 deletions
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 871cfd5..d60b544 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -36,6 +36,7 @@
#include "globals.h"
#include "image.h"
#include "intern_table.h"
+#include "lock_word.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/array-inl.h"
@@ -489,7 +490,30 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* object, void* arg) {
DCHECK_LT(offset + n, image_writer->image_->Size());
memcpy(dst, src, n);
Object* copy = reinterpret_cast<Object*>(dst);
- copy->SetField32(Object::MonitorOffset(), 0, false); // We may have inflated the lock during compilation.
+ // Write in a hash code of objects which have inflated monitors or a hash code in their monitor
+ // word.
+ LockWord lw(copy->GetLockWord());
+ switch (lw.GetState()) {
+ case LockWord::kFatLocked: {
+ Monitor* monitor = lw.FatLockMonitor();
+ CHECK(monitor != nullptr);
+ CHECK(!monitor->IsLocked());
+ copy->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+ break;
+ }
+ case LockWord::kThinLocked: {
+ LOG(FATAL) << "Thin locked object " << obj << " found during object copy";
+ break;
+ }
+ case LockWord::kUnlocked:
+ // Fall-through.
+ case LockWord::kHashCode:
+ // Do nothing since we can just keep the same hash code.
+ break;
+ default:
+ LOG(FATAL) << "Unreachable.";
+ break;
+ }
image_writer->FixupObject(obj, copy);
}
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 736ce2f..c11349802 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -325,24 +325,25 @@ END art_quick_handle_fill_data
ENTRY art_quick_lock_object
cbz r0, slow_lock
retry_lock:
- ldrex r1, [r0, #LOCK_WORD_OFFSET]
ldrt r2, [r9, #THREAD_ID_OFFSET]
- cmp r1, #0
- bmi slow_lock @ lock word contains a monitor
- bne already_thin
+ ldrex r1, [r0, #LOCK_WORD_OFFSET]
+ cbnz r1, not_unlocked @ already thin locked
@ unlocked case - r2 holds thread id with count of 0
strex r3, r2, [r0, #LOCK_WORD_OFFSET]
cbnz r3, strex_fail @ store failed, retry
bx lr
strex_fail:
b retry_lock @ unlikely forward branch, need to reload and recheck r1/r2
-already_thin:
+not_unlocked:
+ lsr r3, r1, 30
+ cbnz r3, slow_lock @ if either of the top two bits are set, go slow path
eor r2, r1, r2 @ lock_word.ThreadId() ^ self->ThreadId()
uxth r2, r2 @ zero top 16 bits
cbnz r2, slow_lock @ lock word and self thread id's match -> recursive lock
@ else contention, go to slow path
- adds r2, r1, #65536 @ increment count in lock word placing in r2 for storing
- bmi slow_lock @ if we overflow the count go slow
+ add r2, r1, #65536 @ increment count in lock word placing in r2 for storing
+ lsr r1, r2, 30 @ if either of the top two bits are set, we overflowed.
+ cbnz r1, slow_lock @ if we overflow the count go slow path
str r2, [r0, #LOCK_WORD_OFFSET] @ no need for strex as we hold the lock
bx lr
slow_lock:
@@ -363,9 +364,9 @@ END art_quick_lock_object
ENTRY art_quick_unlock_object
cbz r0, slow_unlock
ldr r1, [r0, #LOCK_WORD_OFFSET]
+ lsr r2, r1, 30
+ cbnz r2, slow_unlock @ if either of the top two bits are set, go slow path
ldr r2, [r9, #THREAD_ID_OFFSET]
- cmp r1, #0
- bmi slow_unlock @ lock word contains a monitor
eor r3, r1, r2 @ lock_word.ThreadId() ^ self->ThreadId()
uxth r3, r3 @ zero top 16 bits
cbnz r3, slow_unlock @ do lock word and self thread id's match?
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 805f6f4..4e79770 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -411,9 +411,10 @@ DEFINE_FUNCTION art_quick_lock_object
jz slow_lock
retry_lock:
movl LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
+ test LITERAL(0xC0000000), %ecx // test the 2 high bits.
+ jne slow_lock // slow path if either of the two high bits are set.
movl %fs:THREAD_ID_OFFSET, %edx // edx := thread id
test %ecx, %ecx
- jb slow_lock // lock word contains a monitor
jnz already_thin // lock word contains a thin lock
// unlocked case - %edx holds thread id with count of 0
movl %eax, %ecx // remember object in case of retry
@@ -428,7 +429,8 @@ already_thin:
cmpw %ax, %dx // do we hold the lock already?
jne slow_lock
addl LITERAL(65536), %eax // increment recursion count
- jb slow_lock // count overflowed so go slow
+ test LITERAL(0xC0000000), %eax // overflowed if either of top two bits are set
+ jne slow_lock // count overflowed so go slow
movl %eax, LOCK_WORD_OFFSET(%ecx) // update lockword, cmpxchg not necessary as we hold lock
ret
slow_lock:
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 19df2da..7e05136 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -69,7 +69,7 @@ class MarkSweep : public GarbageCollector {
virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual void FinishPhase();
+ virtual void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void MarkReachableObjects()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -208,13 +208,13 @@ class MarkSweep : public GarbageCollector {
void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
void SweepSystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void VerifySystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void VerifyIsLive(const mirror::Object* obj)
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 1c2b7ef..7d2441b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -518,8 +518,9 @@ class Heap {
void PreGcVerification(collector::GarbageCollector* gc);
void PreSweepingGcVerification(collector::GarbageCollector* gc)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PostGcVerification(collector::GarbageCollector* gc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PostGcVerification(collector::GarbageCollector* gc);
// Update the watermark for the native allocated bytes based on the current number of native
// bytes allocated and the target utilization ratio.
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index 30bf9bb..59947f5 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -33,7 +33,7 @@ inline uint32_t LockWord::ThinLockCount() const {
inline Monitor* LockWord::FatLockMonitor() const {
DCHECK_EQ(GetState(), kFatLocked);
- return reinterpret_cast<Monitor*>(value_ << 1);
+ return reinterpret_cast<Monitor*>(value_ << kStateSize);
}
inline LockWord::LockWord() : value_(0) {
@@ -41,10 +41,15 @@ inline LockWord::LockWord() : value_(0) {
}
inline LockWord::LockWord(Monitor* mon)
- : value_((reinterpret_cast<uint32_t>(mon) >> 1) | (kStateFat << kStateShift)) {
+ : value_((reinterpret_cast<uint32_t>(mon) >> kStateSize) | (kStateFat << kStateShift)) {
DCHECK_EQ(FatLockMonitor(), mon);
}
+inline uint32_t LockWord::GetHashCode() const {
+ DCHECK_EQ(GetState(), kHashCode);
+ return (value_ >> kHashShift) & kHashMask;
+}
+
} // namespace art
#endif // ART_RUNTIME_LOCK_WORD_INL_H_
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index cd4bfbb..9b6c64a 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -29,30 +29,37 @@ namespace mirror {
class Monitor;
-/* The lock value itself as stored in mirror::Object::monitor_. The MSB of the lock encodes its
- * state. When cleared, the lock is in the "thin" state and its bits are formatted as follows:
+/* The lock value itself as stored in mirror::Object::monitor_. The two most significant bits of
+ * the state. The three possible states are fat locked, thin/unlocked, and hash code.
+ * When the lock word is in the "thin" state and its bits are formatted as follows:
*
- * |3|32222222222111|11111110000000000|
- * |1|09876543210987|65432109876543210|
- * |0| lock count | thread id |
+ * |33|22222222221111|1111110000000000|
+ * |10|98765432109876|5432109876543210|
+ * |00| lock count |thread id owner |
*
- * When set, the lock is in the "fat" state and its bits are formatted as follows:
+ * When the lock word is in the "fat" state and its bits are formatted as follows:
*
- * |3|3222222222211111111110000000000|
- * |1|0987654321098765432109876543210|
- * |1| Monitor* >> 1 |
+ * |33|222222222211111111110000000000|
+ * |10|987654321098765432109876543210|
+ * |01| Monitor* >> kStateSize |
+ *
+ * When the lock word is in hash state and its bits are formatted as follows:
+ *
+ * |33|222222222211111111110000000000|
+ * |10|987654321098765432109876543210|
+ * |10| HashCode |
*/
class LockWord {
public:
enum {
- // Number of bits to encode the state, currently just fat or thin/unlocked.
- kStateSize = 1,
+ // Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
+ kStateSize = 2,
// Number of bits to encode the thin lock owner.
kThinLockOwnerSize = 16,
// Remaining bits are the recursive lock count.
kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize,
-
// Thin lock bits. Owner in lowest bits.
+
kThinLockOwnerShift = 0,
kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
// Count in higher bits.
@@ -65,25 +72,42 @@ class LockWord {
kStateMask = (1 << kStateSize) - 1,
kStateThinOrUnlocked = 0,
kStateFat = 1,
+ kStateHash = 2,
+
+ // When the state is kHashCode, the non-state bits hold the hashcode.
+ kHashShift = 0,
+ kHashSize = 32 - kStateSize,
+ kHashMask = (1 << kHashSize) - 1,
};
static LockWord FromThinLockId(uint32_t thread_id, uint32_t count) {
CHECK_LE(thread_id, static_cast<uint32_t>(kThinLockOwnerMask));
- return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift));
+ return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift) |
+ (kStateThinOrUnlocked << kStateShift));
+ }
+
+ static LockWord FromHashCode(uint32_t hash_code) {
+ CHECK_LE(hash_code, static_cast<uint32_t>(kHashMask));
+ return LockWord((hash_code << kHashShift) | (kStateHash << kStateShift));
}
enum LockState {
kUnlocked, // No lock owners.
kThinLocked, // Single uncontended owner.
- kFatLocked // See associated monitor.
+ kFatLocked, // See associated monitor.
+ kHashCode, // Lock word contains an identity hash.
};
LockState GetState() const {
+ uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
if (value_ == 0) {
return kUnlocked;
- } else if (((value_ >> kStateShift) & kStateMask) == kStateThinOrUnlocked) {
+ } else if (internal_state == kStateThinOrUnlocked) {
return kThinLocked;
+ } else if (internal_state == kStateHash) {
+ return kHashCode;
} else {
+ DCHECK_EQ(internal_state, static_cast<uint32_t>(kStateFat));
return kFatLocked;
}
}
@@ -103,17 +127,20 @@ class LockWord {
// Constructor a lock word for inflation to use a Monitor.
explicit LockWord(Monitor* mon);
- bool operator==(const LockWord& rhs) {
+ bool operator==(const LockWord& rhs) const {
return GetValue() == rhs.GetValue();
}
- private:
- explicit LockWord(uint32_t val) : value_(val) {}
+ // Return the hash code stored in the lock word, must be kHashCode state.
+ uint32_t GetHashCode() const;
uint32_t GetValue() const {
return value_;
}
+ private:
+ explicit LockWord(uint32_t val) : value_(val) {}
+
// Only Object should be converting LockWords to/from uints.
friend class mirror::Object;
diff --git a/runtime/locks.h b/runtime/locks.h
index f63e2b1..2262218 100644
--- a/runtime/locks.h
+++ b/runtime/locks.h
@@ -53,8 +53,8 @@ enum LockLevel {
kJdwpAttachLock,
kJdwpStartLock,
kRuntimeShutdownLock,
- kHeapBitmapLock,
kMonitorLock,
+ kHeapBitmapLock,
kMutatorLock,
kZygoteCreationLock,
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index e460a8d..7ac2c8c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -44,7 +44,7 @@ inline void Object::SetClass(Class* new_klass) {
SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false, false);
}
-inline LockWord Object::GetLockWord() {
+inline LockWord Object::GetLockWord() const {
return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), true));
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 92c05b2..49bad4c 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <ctime>
+
#include "object.h"
#include "art_field.h"
@@ -82,6 +84,52 @@ Object* Object::Clone(Thread* self) {
return copy.get();
}
+uint32_t Object::GenerateIdentityHashCode() {
+ static AtomicInteger seed(987654321 + std::time(nullptr));
+ uint32_t expected_value, new_value;
+ do {
+ expected_value = static_cast<uint32_t>(seed.load());
+ new_value = expected_value * 1103515245 + 12345;
+ } while (!seed.compare_and_swap(static_cast<int32_t>(expected_value),
+ static_cast<int32_t>(new_value)));
+ return expected_value & LockWord::kHashMask;
+}
+
+int32_t Object::IdentityHashCode() const {
+ while (true) {
+ LockWord lw = GetLockWord();
+ switch (lw.GetState()) {
+ case LockWord::kUnlocked: {
+ // Try to compare and swap in a new hash, if we succeed we will return the hash on the next
+ // loop iteration.
+ LockWord hash_word(LockWord::FromHashCode(GenerateIdentityHashCode()));
+ DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
+ if (const_cast<Object*>(this)->CasLockWord(lw, hash_word)) {
+ return hash_word.GetHashCode();
+ }
+ break;
+ }
+ case LockWord::kThinLocked: {
+ // Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
+ Thread* self = Thread::Current();
+ Monitor::InflateThinLocked(self, const_cast<Object*>(this), lw, GenerateIdentityHashCode());
+ break;
+ }
+ case LockWord::kFatLocked: {
+ // Already inflated, return the has stored in the monitor.
+ Monitor* monitor = lw.FatLockMonitor();
+ DCHECK(monitor != nullptr);
+ return monitor->GetHashCode();
+ }
+ case LockWord::kHashCode: {
+ return lw.GetHashCode();
+ }
+ }
+ }
+ LOG(FATAL) << "Unreachable";
+ return 0;
+}
+
void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value) {
const Class* c = GetClass();
if (Runtime::Current()->GetClassLinker() == NULL ||
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index e3f5c10..11473cd 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -27,6 +27,7 @@ namespace art {
class ImageWriter;
class LockWord;
+class Monitor;
struct ObjectOffsets;
class Thread;
@@ -84,19 +85,13 @@ class MANAGED Object {
Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int32_t IdentityHashCode() const {
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: we'll need to use the Object's internal concept of identity
- UNIMPLEMENTED(FATAL);
-#endif
- return reinterpret_cast<int32_t>(this);
- }
+ int32_t IdentityHashCode() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
}
- LockWord GetLockWord();
+ LockWord GetLockWord() const;
void SetLockWord(LockWord new_val);
bool CasLockWord(LockWord old_val, LockWord new_val);
uint32_t GetLockOwnerThreadId();
@@ -243,7 +238,6 @@ class MANAGED Object {
private:
static void VerifyObject(const Object* obj) ALWAYS_INLINE;
-
// Verify the type correctness of stores to fields.
void CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -254,6 +248,9 @@ class MANAGED Object {
}
}
+ // Generate an identity hash code.
+ static uint32_t GenerateIdentityHashCode();
+
// Write barrier called post update to a reference bearing field.
static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value);
@@ -262,6 +259,7 @@ class MANAGED Object {
uint32_t monitor_;
friend class art::ImageWriter;
+ friend class art::Monitor;
friend struct art::ObjectOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index a5605ff..b1bf84f 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -79,35 +79,49 @@ void Monitor::Init(uint32_t lock_profiling_threshold, bool (*is_sensitive_thread
is_sensitive_thread_hook_ = is_sensitive_thread_hook;
}
-Monitor::Monitor(Thread* owner, mirror::Object* obj)
+Monitor::Monitor(Thread* owner, mirror::Object* obj, uint32_t hash_code)
: monitor_lock_("a monitor lock", kMonitorLock),
monitor_contenders_("monitor contenders", monitor_lock_),
owner_(owner),
lock_count_(0),
obj_(obj),
wait_set_(NULL),
+ hash_code_(hash_code),
locking_method_(NULL),
locking_dex_pc_(0) {
// We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
// with the owner unlocking the thin-lock.
- CHECK(owner == Thread::Current() || owner->IsSuspended());
+ CHECK(owner == nullptr || owner == Thread::Current() || owner->IsSuspended());
+ // The identity hash code is set for the life time of the monitor.
}
bool Monitor::Install(Thread* self) {
MutexLock mu(self, monitor_lock_); // Uncontended mutex acquisition as monitor isn't yet public.
- CHECK(owner_ == self || owner_->IsSuspended());
+ CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
// Propagate the lock state.
- LockWord thin(obj_->GetLockWord());
- if (thin.GetState() != LockWord::kThinLocked) {
- // The owner_ is suspended but another thread beat us to install a monitor.
- CHECK_EQ(thin.GetState(), LockWord::kFatLocked);
- return false;
+ LockWord lw(obj_->GetLockWord());
+ switch (lw.GetState()) {
+ case LockWord::kThinLocked: {
+ CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
+ lock_count_ = lw.ThinLockCount();
+ break;
+ }
+ case LockWord::kHashCode: {
+ CHECK_EQ(hash_code_, lw.GetHashCode());
+ break;
+ }
+ case LockWord::kFatLocked: {
+ // The owner_ is suspended but another thread beat us to install a monitor.
+ return false;
+ }
+ case LockWord::kUnlocked: {
+ LOG(FATAL) << "Inflating unlocked lock word";
+ break;
+ }
}
- CHECK_EQ(owner_->GetThreadId(), thin.ThinLockOwner());
- lock_count_ = thin.ThinLockCount();
LockWord fat(this);
// Publish the updated lock word, which may race with other threads.
- bool success = obj_->CasLockWord(thin, fat);
+ bool success = obj_->CasLockWord(lw, fat);
// Lock profiling.
if (success && lock_profiling_threshold_ != 0) {
locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_);
@@ -540,19 +554,46 @@ void Monitor::NotifyAll(Thread* self) {
* thread must own the lock or the owner must be suspended. There's a race with other threads
* inflating the lock and so the caller should read the monitor following the call.
*/
-void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj) {
+void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
DCHECK(self != NULL);
- DCHECK(owner != NULL);
DCHECK(obj != NULL);
-
// Allocate and acquire a new monitor.
- UniquePtr<Monitor> m(new Monitor(owner, obj));
+ UniquePtr<Monitor> m(new Monitor(owner, obj, hash_code));
if (m->Install(self)) {
VLOG(monitor) << "monitor: thread " << owner->GetThreadId()
<< " created monitor " << m.get() << " for object " << obj;
Runtime::Current()->GetMonitorList()->Add(m.release());
+ CHECK_EQ(obj->GetLockWord().GetState(), LockWord::kFatLocked);
+ }
+}
+
+void Monitor::InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
+ uint32_t hash_code) {
+ DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
+ uint32_t owner_thread_id = lock_word.ThinLockOwner();
+ if (owner_thread_id == self->GetThreadId()) {
+ // We own the monitor, we can easily inflate it.
+ Inflate(self, self, obj, hash_code);
+ } else {
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
+ ScopedThreadStateChange tsc(self, kBlocked);
+ if (lock_word == obj->GetLockWord()) { // If lock word hasn't changed.
+ bool timed_out;
+ Thread* owner = thread_list->SuspendThreadByThreadId(lock_word.ThinLockOwner(), false,
+ &timed_out);
+ if (owner != nullptr) {
+ // We succeeded in suspending the thread, check the lock's status didn't change.
+ lock_word = obj->GetLockWord();
+ if (lock_word.GetState() == LockWord::kThinLocked &&
+ lock_word.ThinLockOwner() == owner_thread_id) {
+ // Go ahead and inflate the lock.
+ Inflate(self, owner, obj, hash_code);
+ }
+ thread_list->Resume(owner, false);
+ }
+ }
}
- CHECK_EQ(obj->GetLockWord().GetState(), LockWord::kFatLocked);
}
void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
@@ -560,7 +601,6 @@ void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
DCHECK(obj != NULL);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
-
while (true) {
LockWord lock_word = obj->GetLockWord();
switch (lock_word.GetState()) {
@@ -582,33 +622,17 @@ void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
return; // Success!
} else {
// We'd overflow the recursion count, so inflate the monitor.
- Inflate(self, self, obj);
+ InflateThinLocked(self, obj, lock_word, mirror::Object::GenerateIdentityHashCode());
}
} else {
// Contention.
contention_count++;
- if (contention_count <= Runtime::Current()->GetMaxSpinsBeforeThinkLockInflation()) {
+ Runtime* runtime = Runtime::Current();
+ if (contention_count <= runtime->GetMaxSpinsBeforeThinkLockInflation()) {
NanoSleep(1000); // Sleep for 1us and re-attempt.
} else {
contention_count = 0;
- // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
- ScopedThreadStateChange tsc(self, kBlocked);
- bool timed_out;
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
- if (lock_word == obj->GetLockWord()) { // If lock word hasn't changed.
- Thread* owner = thread_list->SuspendThreadByThreadId(lock_word.ThinLockOwner(), false,
- &timed_out);
- if (owner != NULL) {
- // We succeeded in suspending the thread, check the lock's status didn't change.
- lock_word = obj->GetLockWord();
- if (lock_word.GetState() == LockWord::kThinLocked &&
- lock_word.ThinLockOwner() == owner_thread_id) {
- // Go ahead and inflate the lock.
- Inflate(self, owner, obj);
- }
- thread_list->Resume(owner, false);
- }
- }
+ InflateThinLocked(self, obj, lock_word, mirror::Object::GenerateIdentityHashCode());
}
}
continue; // Start from the beginning.
@@ -618,6 +642,11 @@ void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
mon->Lock(self);
return; // Success!
}
+ case LockWord::kHashCode: {
+ // Inflate with the existing hashcode.
+ Inflate(self, nullptr, obj, lock_word.GetHashCode());
+ break;
+ }
}
}
}
@@ -628,6 +657,8 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
LockWord lock_word = obj->GetLockWord();
switch (lock_word.GetState()) {
+ case LockWord::kHashCode:
+ // Fall-through.
case LockWord::kUnlocked:
FailedUnlock(obj, self, NULL, NULL);
return false; // Failure.
@@ -672,6 +703,8 @@ void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
LockWord lock_word = obj->GetLockWord();
switch (lock_word.GetState()) {
+ case LockWord::kHashCode:
+ // Fall-through.
case LockWord::kUnlocked:
ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
return; // Failure.
@@ -683,7 +716,7 @@ void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
return; // Failure.
} else {
// We own the lock, inflate to enqueue ourself on the Monitor.
- Inflate(self, self, obj);
+ Inflate(self, self, obj, mirror::Object::GenerateIdentityHashCode());
lock_word = obj->GetLockWord();
}
break;
@@ -701,6 +734,8 @@ void Monitor::DoNotify(Thread* self, mirror::Object* obj, bool notify_all) {
LockWord lock_word = obj->GetLockWord();
switch (lock_word.GetState()) {
+ case LockWord::kHashCode:
+ // Fall-through.
case LockWord::kUnlocked:
ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
return; // Failure.
@@ -732,6 +767,8 @@ uint32_t Monitor::GetLockOwnerThreadId(mirror::Object* obj) {
LockWord lock_word = obj->GetLockWord();
switch (lock_word.GetState()) {
+ case LockWord::kHashCode:
+ // Fall-through.
case LockWord::kUnlocked:
return ThreadList::kInvalidThreadId;
case LockWord::kThinLocked:
@@ -889,12 +926,19 @@ bool Monitor::IsValidLockWord(LockWord lock_word) {
}
return false; // Fail - unowned monitor in an object.
}
+ case LockWord::kHashCode:
+ return true;
default:
LOG(FATAL) << "Unreachable";
return false;
}
}
+bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MutexLock mu(Thread::Current(), monitor_lock_);
+ return owner_ != nullptr;
+}
+
void Monitor::TranslateLocation(const mirror::ArtMethod* method, uint32_t dex_pc,
const char** source_file, uint32_t* line_number) const {
// If method is null, location is unknown
@@ -976,6 +1020,8 @@ MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
LockWord lock_word = obj->GetLockWord();
switch (lock_word.GetState()) {
case LockWord::kUnlocked:
+ // Fall-through.
+ case LockWord::kHashCode:
break;
case LockWord::kThinLocked:
owner_ = Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 27124a2..c464400 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -88,8 +88,7 @@ class Monitor {
static bool IsValidLockWord(LockWord lock_word);
- // TODO: SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- mirror::Object* GetObject() const {
+ mirror::Object* GetObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return obj_;
}
@@ -99,8 +98,17 @@ class Monitor {
return owner_;
}
+ int32_t GetHashCode() const {
+ return hash_code_;
+ }
+
+ bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
+ uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
+
private:
- explicit Monitor(Thread* owner, mirror::Object* obj)
+ explicit Monitor(Thread* owner, mirror::Object* obj, uint32_t hash_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Install the monitor into its object, may fail if another thread installs a different monitor
@@ -112,7 +120,7 @@ class Monitor {
void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
- static void Inflate(Thread* self, Thread* owner, mirror::Object* obj)
+ static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
@@ -171,6 +179,9 @@ class Monitor {
// Threads currently waiting on this monitor.
Thread* wait_set_ GUARDED_BY(monitor_lock_);
+ // Stored object hash code, always generated.
+ const uint32_t hash_code_;
+
// Method and dex pc where the lock owner acquired the lock, used when lock
// sampling is enabled. locking_method_ may be null if the lock is currently
// unlocked, or if the lock is acquired by the system when the stack is empty.
@@ -190,7 +201,7 @@ class MonitorList {
void Add(Monitor* m);
- void SweepMonitorList(RootVisitor visitor, void* arg);
+ void SweepMonitorList(RootVisitor visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DisallowNewMonitors();
void AllowNewMonitors();
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 6674db2..ea78e04 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -339,6 +339,9 @@ static void System_arraycopyCharUnchecked(JNIEnv* env, jclass, jobject javaSrc,
}
static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
+ if (javaObject == nullptr) {
+ return 0;
+ }
ScopedFastNativeObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(javaObject);
return static_cast<jint>(o->IdentityHashCode());
diff --git a/runtime/runtime.h b/runtime/runtime.h
index b6429b6..77da098 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -331,7 +331,8 @@ class Runtime {
// Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
// system weak is updated to be the visitor's returned value.
- void SweepSystemWeaks(RootVisitor* visitor, void* arg);
+ void SweepSystemWeaks(RootVisitor* visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
mirror::ArtMethod* GetResolutionMethod() const {