summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-06-15 12:39:02 -0700
committerMathieu Chartier <mathieuc@google.com>2014-06-16 14:01:52 -0700
commit308351ada0008b0cbe1a5afc31c302c975554ee4 (patch)
tree1447c36df5616515d6e7ac35c185721d3c829c4b
parent97ed29f800c56a06fd6989e0883e4c97bedd2453 (diff)
downloadart-308351ada0008b0cbe1a5afc31c302c975554ee4.zip
art-308351ada0008b0cbe1a5afc31c302c975554ee4.tar.gz
art-308351ada0008b0cbe1a5afc31c302c975554ee4.tar.bz2
Change reference processing to use heap references.
Removes several SetReferents for updating moved referents. Cleaned up other aspects of the code. Change-Id: Ibcb4d713fadea617efee7e936352ddf77ff4c370
-rw-r--r--runtime/gc/collector/mark_sweep.cc11
-rw-r--r--runtime/gc/collector/mark_sweep.h4
-rw-r--r--runtime/gc/collector/semi_space.cc20
-rw-r--r--runtime/gc/collector/semi_space.h4
-rw-r--r--runtime/gc/heap.cc6
-rw-r--r--runtime/gc/reference_processor.cc99
-rw-r--r--runtime/gc/reference_processor.h13
-rw-r--r--runtime/gc/reference_queue.cc82
-rw-r--r--runtime/gc/reference_queue.h13
-rw-r--r--runtime/mirror/reference.h16
-rw-r--r--runtime/object_callbacks.h5
11 files changed, 150 insertions, 123 deletions
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 890036b..c72913a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -176,7 +176,7 @@ void MarkSweep::ProcessReferences(Thread* self) {
TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true, &timings_, clear_soft_references_, &IsMarkedCallback, &MarkObjectCallback,
+ true, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
&ProcessMarkStackCallback, this);
}
@@ -374,6 +374,10 @@ void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*
reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
}
+bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
+ return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
+}
+
class MarkSweepMarkObjectSlowPath {
public:
explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
@@ -1170,11 +1174,11 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
- DCHECK(klass != nullptr);
if (kCountJavaLangRefs) {
++reference_count_;
}
- heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
+ this);
}
class MarkObjectVisitor {
@@ -1270,6 +1274,7 @@ void MarkSweep::ProcessMarkStack(bool paused) {
inline bool MarkSweep::IsMarked(const Object* object) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ DCHECK(object != nullptr);
if (immune_region_.ContainsObject(object)) {
return true;
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index a0a0dd8..a44d8a1 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -178,6 +178,10 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
RootType root_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index e5bb1cc..badf8b3 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -164,7 +164,7 @@ void SemiSpace::ProcessReferences(Thread* self) {
TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- false, &timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
+ false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback,
&MarkObjectCallback, &ProcessMarkStackCallback, this);
}
@@ -649,6 +649,22 @@ void SemiSpace::MarkRoots() {
Runtime::Current()->VisitRoots(MarkRootCallback, this);
}
+bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object,
+ void* arg) {
+ mirror::Object* obj = object->AsMirrorPtr();
+ mirror::Object* new_obj =
+ reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj);
+ if (new_obj == nullptr) {
+ return false;
+ }
+ if (new_obj != obj) {
+ // Write barrier is not necessary since it still points to the same object, just at a different
+ // address.
+ object->Assign(new_obj);
+ }
+ return true;
+}
+
mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
}
@@ -698,7 +714,7 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
// marked, put it on the appropriate list in the heap for later processing.
void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
- MarkedForwardingAddressCallback, this);
+ &HeapReferenceMarkedCallback, this);
}
class SemiSpaceMarkObjectVisitor {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index a95abe4..bff0847 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -162,6 +162,10 @@ class SemiSpace : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f5f7a86..ba7969e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1411,7 +1411,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
ChangeCollector(collector_type);
tl->ResumeAll();
// Can't call into java code with all threads suspended.
- reference_processor_.EnqueueClearedReferences();
+ reference_processor_.EnqueueClearedReferences(self);
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
@@ -1814,7 +1814,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
total_bytes_freed_ever_ += collector->GetFreedBytes();
RequestHeapTrim();
// Enqueue cleared references.
- reference_processor_.EnqueueClearedReferences();
+ reference_processor_.EnqueueClearedReferences(self);
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(collector);
const size_t duration = collector->GetDurationNs();
@@ -1840,7 +1840,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
<< ((i != pause_times.size() - 1) ? "," : "");
}
LOG(INFO) << gc_cause << " " << collector->GetName()
- << " GC freed " << collector->GetFreedObjects() << "("
+ << " GC freed " << collector->GetFreedObjects() << "("
<< PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
<< collector->GetFreedLargeObjects() << "("
<< PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 7988af7..3ff9889 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -44,36 +44,35 @@ void ReferenceProcessor::DisableSlowPath(Thread* self) {
mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
mirror::Object* const referent = reference->GetReferent();
- if (LIKELY(!slow_path_enabled_)) {
+ // If the referent is null then it is already cleared, we can just return null since there is no
+ // scenario where it becomes non-null during the reference processing phase.
+ if (LIKELY(!slow_path_enabled_) || referent == nullptr) {
return referent;
}
- // Another fast path, the referent is cleared, we can just return null since there is no scenario
- // where it becomes non-null.
- if (referent == nullptr) {
- return nullptr;
- }
MutexLock mu(self, lock_);
while (slow_path_enabled_) {
- mirror::Object* const referent = reference->GetReferent();
- // If the referent became cleared, return it.
- if (referent == nullptr) {
+ mirror::HeapReference<mirror::Object>* const referent_addr =
+ reference->GetReferentReferenceAddr();
+ // If the referent became cleared, return it. Don't need barrier since thread roots can't get
+ // updated until after we leave the function due to holding the mutator lock.
+ if (referent_addr->AsMirrorPtr() == nullptr) {
return nullptr;
}
// Try to see if the referent is already marked by using the is_marked_callback. We can return
- // it to the mutator as long as the GC is not preserving references. If the GC is
- IsMarkedCallback* const is_marked_callback = process_references_args_.is_marked_callback_;
+ // it to the mutator as long as the GC is not preserving references.
+ IsHeapReferenceMarkedCallback* const is_marked_callback =
+ process_references_args_.is_marked_callback_;
if (LIKELY(is_marked_callback != nullptr)) {
- mirror::Object* const obj = is_marked_callback(referent, process_references_args_.arg_);
// If it's null it means not marked, but it could become marked if the referent is reachable
// by finalizer referents. So we can not return in this case and must block. Otherwise, we
// can return it to the mutator as long as the GC is not preserving references, in which
// case only black nodes can be safely returned. If the GC is preserving references, the
// mutator could take a white field from a grey or white node and move it somewhere else
// in the heap causing corruption since this field would get swept.
- if (obj != nullptr) {
+ if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
if (!preserving_references_ ||
(LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
- return obj;
+ return referent_addr->AsMirrorPtr();
}
}
}
@@ -82,10 +81,14 @@ mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference*
return reference->GetReferent();
}
-mirror::Object* ReferenceProcessor::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
+bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
+ void* arg) {
auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
- // TODO: Not preserve all soft references.
- return args->mark_callback_(obj, args->arg_);
+ // TODO: Add smarter logic for preserving soft references.
+ mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
+ DCHECK(new_obj != nullptr);
+ obj->Assign(new_obj);
+ return true;
}
void ReferenceProcessor::StartPreservingReferences(Thread* self) {
@@ -103,7 +106,7 @@ void ReferenceProcessor::StopPreservingReferences(Thread* self) {
// Process reference class instances and schedule finalizations.
void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
bool clear_soft_references,
- IsMarkedCallback* is_marked_callback,
+ IsHeapReferenceMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_object_callback,
ProcessMarkStackCallback* process_mark_stack_callback,
void* arg) {
@@ -132,8 +135,8 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
}
}
// Clear all remaining soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
{
TimingLogger::ScopedSplit split(concurrent ? "EnqueueFinalizerReferences" :
"(Paused)EnqueueFinalizerReferences", timings);
@@ -141,7 +144,7 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
StartPreservingReferences(self);
}
// Preserve all white objects with finalize methods and schedule them for finalization.
- finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
+ finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
mark_object_callback, arg);
process_mark_stack_callback(arg);
if (concurrent) {
@@ -149,10 +152,10 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
}
}
// Clear all finalizer referent reachable soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
// Clear all phantom references with white referents.
- phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
// At this point all reference queues other than the cleared references should be empty.
DCHECK(soft_reference_queue_.IsEmpty());
DCHECK(weak_reference_queue_.IsEmpty());
@@ -176,39 +179,33 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsMarkedCallback is_marked_callback, void* arg) {
+ IsHeapReferenceMarkedCallback* is_marked_callback,
+ void* arg) {
// klass can be the class of the old object if the visitor already updated the class of ref.
+ DCHECK(klass != nullptr);
DCHECK(klass->IsReferenceClass());
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
- if (referent != nullptr) {
- mirror::Object* forward_address = is_marked_callback(referent, arg);
- // Null means that the object is not currently marked.
- if (forward_address == nullptr) {
- Thread* self = Thread::Current();
- // TODO: Remove these locks, and use atomic stacks for storing references?
- // We need to check that the references haven't already been enqueued since we can end up
- // scanning the same reference multiple times due to dirty cards.
- if (klass->IsSoftReferenceClass()) {
- soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsWeakReferenceClass()) {
- weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsFinalizerReferenceClass()) {
- finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsPhantomReferenceClass()) {
- phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else {
- LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
- << klass->GetAccessFlags();
- }
- } else if (referent != forward_address) {
- // Referent is already marked and we need to update it.
- ref->SetReferent<false>(forward_address);
+ mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
+ if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
+ Thread* self = Thread::Current();
+ // TODO: Remove these locks, and use atomic stacks for storing references?
+ // We need to check that the references haven't already been enqueued since we can end up
+ // scanning the same reference multiple times due to dirty cards.
+ if (klass->IsSoftReferenceClass()) {
+ soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsWeakReferenceClass()) {
+ weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsFinalizerReferenceClass()) {
+ finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsPhantomReferenceClass()) {
+ phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else {
+ LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+ << klass->GetAccessFlags();
}
}
}
-void ReferenceProcessor::EnqueueClearedReferences() {
- Thread* self = Thread::Current();
+void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Locks::mutator_lock_->AssertNotHeld(self);
if (!cleared_references_.IsEmpty()) {
// When a runtime isn't started there are no reference queues to care about so ignore.
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index f082a9e..ff7da52 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -40,9 +40,10 @@ class Heap;
class ReferenceProcessor {
public:
explicit ReferenceProcessor();
- static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
+ static bool PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
- IsMarkedCallback* is_marked_callback,
+ IsHeapReferenceMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_object_callback,
ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -54,21 +55,21 @@ class ReferenceProcessor {
// Decode the referent, may block if references are being processed.
mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
- void EnqueueClearedReferences() LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsMarkedCallback is_marked_callback, void* arg)
+ IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
class ProcessReferencesArgs {
public:
- ProcessReferencesArgs(IsMarkedCallback* is_marked_callback,
+ ProcessReferencesArgs(IsHeapReferenceMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_callback, void* arg)
: is_marked_callback_(is_marked_callback), mark_callback_(mark_callback), arg_(arg) {
}
// The is marked callback is null when the args aren't set up.
- IsMarkedCallback* is_marked_callback_;
+ IsHeapReferenceMarkedCallback* is_marked_callback_;
MarkObjectCallback* mark_callback_;
void* arg_;
};
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 3910c29..19476e6 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -26,8 +26,7 @@ namespace art {
namespace gc {
ReferenceQueue::ReferenceQueue()
- : lock_("reference queue lock"),
- list_(nullptr) {
+ : lock_("reference queue lock"), list_(nullptr) {
}
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
@@ -104,76 +103,61 @@ void ReferenceQueue::Dump(std::ostream& os) const {
}
}
-void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references,
- IsMarkedCallback* preserve_callback,
+void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
+ IsHeapReferenceMarkedCallback* preserve_callback,
void* arg) {
while (!IsEmpty()) {
mirror::Reference* ref = DequeuePendingReference();
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
- if (referent != nullptr) {
- mirror::Object* forward_address = preserve_callback(referent, arg);
- if (forward_address == nullptr) {
- // Referent is white, clear it.
- if (Runtime::Current()->IsActiveTransaction()) {
- ref->ClearReferent<true>();
- } else {
- ref->ClearReferent<false>();
- }
- if (ref->IsEnqueuable()) {
- cleared_references.EnqueuePendingReference(ref);
- }
- } else if (referent != forward_address) {
- // Object moved, need to updated the referent.
- ref->SetReferent<false>(forward_address);
+ mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
+ if (referent_addr->AsMirrorPtr() != nullptr && !preserve_callback(referent_addr, arg)) {
+ // Referent is white, clear it.
+ if (Runtime::Current()->IsActiveTransaction()) {
+ ref->ClearReferent<true>();
+ } else {
+ ref->ClearReferent<false>();
+ }
+ if (ref->IsEnqueuable()) {
+ cleared_references->EnqueuePendingReference(ref);
}
}
}
}
-void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
- IsMarkedCallback* is_marked_callback,
+void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
+ IsHeapReferenceMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_object_callback,
void* arg) {
while (!IsEmpty()) {
mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
- if (referent != nullptr) {
- mirror::Object* forward_address = is_marked_callback(referent, arg);
- // If the referent isn't marked, mark it and update the
- if (forward_address == nullptr) {
- forward_address = mark_object_callback(referent, arg);
- // If the referent is non-null the reference must queuable.
- DCHECK(ref->IsEnqueuable());
- // Move the updated referent to the zombie field.
- if (Runtime::Current()->IsActiveTransaction()) {
- ref->SetZombie<true>(forward_address);
- ref->ClearReferent<true>();
- } else {
- ref->SetZombie<false>(forward_address);
- ref->ClearReferent<false>();
- }
- cleared_references.EnqueueReference(ref);
- } else if (referent != forward_address) {
- ref->SetReferent<false>(forward_address);
+ mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
+ if (referent_addr->AsMirrorPtr() != nullptr && !is_marked_callback(referent_addr, arg)) {
+ mirror::Object* forward_address = mark_object_callback(referent_addr->AsMirrorPtr(), arg);
+ // If the referent is non-null the reference must queuable.
+ DCHECK(ref->IsEnqueuable());
+ // Move the updated referent to the zombie field.
+ if (Runtime::Current()->IsActiveTransaction()) {
+ ref->SetZombie<true>(forward_address);
+ ref->ClearReferent<true>();
+ } else {
+ ref->SetZombie<false>(forward_address);
+ ref->ClearReferent<false>();
}
+ cleared_references->EnqueueReference(ref);
}
}
}
-void ReferenceQueue::ForwardSoftReferences(IsMarkedCallback* preserve_callback,
- void* arg) {
+void ReferenceQueue::ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback,
+ void* arg) {
if (UNLIKELY(IsEmpty())) {
return;
}
mirror::Reference* const head = list_;
mirror::Reference* ref = head;
do {
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
- if (referent != nullptr) {
- mirror::Object* forward_address = preserve_callback(referent, arg);
- if (forward_address != nullptr && forward_address != referent) {
- ref->SetReferent<false>(forward_address);
- }
+ mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
+ if (referent_addr->AsMirrorPtr() != nullptr) {
+ UNUSED(preserve_callback(referent_addr, arg));
}
ref = ref->GetPendingNext();
} while (LIKELY(ref != head));
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 1d8cc1a..8ef0d20 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -58,23 +58,22 @@ class ReferenceQueue {
mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to the
// zombie field, and the referent field is cleared.
- void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
- IsMarkedCallback* is_marked_callback,
+ void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
+ IsHeapReferenceMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_object_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Walks the reference list marking any references subject to the reference clearing policy.
// References with a black referent are removed from the list. References with white referents
// biased toward saving are blackened and also removed from the list.
- void ForwardSoftReferences(IsMarkedCallback* preserve_callback, void* arg)
+ void ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
- void ClearWhiteReferences(ReferenceQueue& cleared_references,
- IsMarkedCallback* is_marked_callback,
- void* arg)
+ void ClearWhiteReferences(ReferenceQueue* cleared_references,
+ IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsEmpty() const {
return list_ == nullptr;
}
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 0b6e759..9c9d87b 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -21,6 +21,13 @@
namespace art {
+namespace gc {
+
+class ReferenceProcessor;
+class ReferenceQueue;
+
+} // namespace gc
+
struct ReferenceOffsets;
struct FinalizerReferenceOffsets;
@@ -41,7 +48,6 @@ class MANAGED Reference : public Object {
static MemberOffset ReferentOffset() {
return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
}
-
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
@@ -55,7 +61,6 @@ class MANAGED Reference : public Object {
void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
-
// Volatile read/write is not necessary since the java pending next is only accessed from
// the java threads for cleared references. Once these cleared references have a null referent,
// we never end up reading their pending next from the GC again.
@@ -76,6 +81,11 @@ class MANAGED Reference : public Object {
bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ // Note: This avoids a read barrier, it should only be used by the GC.
+ HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
+ }
+
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
HeapReference<Reference> pending_next_; // Note this is Java volatile:
HeapReference<Object> queue_; // Note this is Java volatile:
@@ -83,6 +93,8 @@ class MANAGED Reference : public Object {
HeapReference<Object> referent_; // Note this is Java volatile:
friend struct art::ReferenceOffsets; // for verifying offset information
+ friend class gc::ReferenceProcessor;
+ friend class gc::ReferenceQueue;
DISALLOW_IMPLICIT_CONSTRUCTORS(Reference);
};
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index dd8ce16..d8c1c40 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -70,6 +70,11 @@ typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Refe
// address the object (if the object didn't move, returns the object input parameter).
typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
__attribute__((warn_unused_result));
+
+// Returns true if the object in the heap reference is marked, if it is marked and has moved the
+// callback updates the heap reference contain the new value.
+typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object,
+ void* arg) __attribute__((warn_unused_result));
typedef void (ProcessMarkStackCallback)(void* arg);
} // namespace art