summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--runtime/monitor.cc15
-rw-r--r--runtime/monitor.h8
-rw-r--r--runtime/read_barrier-inl.h15
-rw-r--r--runtime/read_barrier.h4
4 files changed, 33 insertions, 9 deletions
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index f783edb..7d297cb 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -111,7 +111,7 @@ bool Monitor::Install(Thread* self) {
MutexLock mu(self, monitor_lock_); // Uncontended mutex acquisition as monitor isn't yet public.
CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
// Propagate the lock state.
- LockWord lw(obj_->GetLockWord(false));
+ LockWord lw(GetObject()->GetLockWord(false));
switch (lw.GetState()) {
case LockWord::kThinLocked: {
CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
@@ -137,7 +137,7 @@ bool Monitor::Install(Thread* self) {
}
LockWord fat(this);
// Publish the updated lock word, which may race with other threads.
- bool success = obj_->CasLockWord(lw, fat);
+ bool success = GetObject()->CasLockWord(lw, fat);
// Lock profiling.
if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_);
@@ -228,7 +228,7 @@ void Monitor::Lock(Thread* self) {
monitor_lock_.Unlock(self); // Let go of locks in order.
{
ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
- self->SetMonitorEnterObject(obj_);
+ self->SetMonitorEnterObject(GetObject());
MutexLock mu2(self, monitor_lock_); // Reacquire monitor_lock_ without mutator_lock_ for Wait.
if (owner_ != NULL) { // Did the owner_ give the lock up?
monitor_contenders_.Wait(self); // Still contended so wait.
@@ -363,7 +363,7 @@ bool Monitor::Unlock(Thread* self) {
// We don't own this, so we're not allowed to unlock it.
// The JNI spec says that we should throw IllegalMonitorStateException
// in this case.
- FailedUnlock(obj_, self, owner, this);
+ FailedUnlock(GetObject(), self, owner, this);
return false;
}
return true;
@@ -895,7 +895,7 @@ void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
MutexLock mu(self, *thread->GetWaitMutex());
Monitor* monitor = thread->GetWaitMonitor();
if (monitor != nullptr) {
- pretty_object = monitor->obj_;
+ pretty_object = monitor->GetObject();
}
} else if (state == kBlocked) {
wait_message = " - waiting to lock ";
@@ -1101,12 +1101,13 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
MutexLock mu(Thread::Current(), monitor_list_lock_);
for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
- mirror::Object* obj = m->GetObject();
+ // Disable the read barrier in GetObject() as this is called by GC.
+ mirror::Object* obj = m->GetObject<kWithoutReadBarrier>();
// The object of a monitor can be null if we have deflated it.
mirror::Object* new_obj = obj != nullptr ? callback(obj, arg) : nullptr;
if (new_obj == nullptr) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
- << m->GetObject();
+ << obj;
delete m;
it = list_.erase(it);
} else {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index bc1b2ed4..7af2d4c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -27,6 +27,7 @@
#include "atomic.h"
#include "base/mutex.h"
#include "object_callbacks.h"
+#include "read_barrier.h"
#include "thread_state.h"
namespace art {
@@ -92,8 +93,9 @@ class Monitor {
static bool IsValidLockWord(LockWord lock_word);
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return obj_;
+ return ReadBarrier::BarrierForWeakRoot<mirror::Object, kReadBarrierOption>(obj_);
}
void SetObject(mirror::Object* object);
@@ -190,7 +192,9 @@ class Monitor {
// Owner's recursive lock depth.
int lock_count_ GUARDED_BY(monitor_lock_);
- // What object are we part of.
+ // What object are we part of. This is a weak root. Do not access
+ // this directly, use GetObject() to read it so it will be guarded
+ // by a read barrier.
mirror::Object* obj_;
// Threads currently waiting on this monitor.
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 88e2f8f..4302c9e 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -43,6 +43,21 @@ inline MirrorType* ReadBarrier::Barrier(
}
}
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
+inline MirrorType* ReadBarrier::BarrierForWeakRoot(MirrorType* ref) {
+ UNUSED(ref);
+ const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
+ if (with_read_barrier && kUseBakerReadBarrier) {
+ // To be implemented.
+ return ref;
+ } else if (with_read_barrier && kUseBrooksReadBarrier) {
+ // To be implemented.
+ return ref;
+ } else {
+ return ref;
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_READ_BARRIER_INL_H_
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 73c3d43..e40e8ea 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -37,6 +37,10 @@ class ReadBarrier {
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ ALWAYS_INLINE static MirrorType* BarrierForWeakRoot(MirrorType* ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
} // namespace art