From 8fa2dad7fe7909c8335101d6c8904ae997cdf29f Mon Sep 17 00:00:00 2001 From: Mathieu Chartier Date: Thu, 13 Mar 2014 12:22:56 -0700 Subject: Refactor reference code into mirror namespace. Added two new files: mirror/reference.h and mirror/reference-inl.h. Change-Id: Ibe3ff6379aef7096ff130594535b7f7c0b7dabce --- runtime/gc/collector/mark_sweep.cc | 5 +- runtime/gc/collector/semi_space.cc | 3 +- runtime/gc/heap.cc | 78 +++++------------------------- runtime/gc/heap.h | 44 +---------------- runtime/gc/reference_queue.cc | 98 +++++++++++++++++++------------------- runtime/gc/reference_queue.h | 20 ++++---- 6 files changed, 76 insertions(+), 172 deletions(-) (limited to 'runtime/gc') diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 8b9f60e..4f3ad32 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -44,6 +44,7 @@ #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" +#include "mirror/reference-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array.h" #include "mirror/object_array-inl.h" @@ -1189,9 +1190,7 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { // the heap for later processing. void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { DCHECK(klass != nullptr); - DCHECK(klass->IsReferenceClass()); - DCHECK(obj != NULL); - heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this); + heap_->DelayReferenceReferent(klass, obj->AsReference(), IsMarkedCallback, this); } class MarkObjectVisitor { diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 89694d4..23b155c 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -44,6 +44,7 @@ #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" +#include "mirror/reference-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array.h" #include "mirror/object_array-inl.h" @@ -633,7 +634,7 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been // marked, put it on the appropriate list in the heap for later processing. void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { - heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); + heap_->DelayReferenceReferent(klass, obj->AsReference(), MarkedForwardingAddressCallback, this); } class SemiSpaceMarkObjectVisitor { diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 13dd90e..76b94fd 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -54,6 +54,7 @@ #include "mirror/object.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" +#include "mirror/reference-inl.h" #include "object_utils.h" #include "os.h" #include "runtime.h" @@ -103,11 +104,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max ignore_max_footprint_(ignore_max_footprint), have_zygote_space_(false), large_object_threshold_(std::numeric_limits::max()), // Starts out disabled. - soft_reference_queue_(this), - weak_reference_queue_(this), - finalizer_reference_queue_(this), - phantom_reference_queue_(this), - cleared_references_(this), collector_type_running_(kCollectorTypeNone), last_gc_type_(collector::kGcTypeNone), next_gc_type_(collector::kGcTypePartial), @@ -144,11 +140,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max current_non_moving_allocator_(kAllocatorTypeNonMoving), bump_pointer_space_(nullptr), temp_space_(nullptr), - reference_referent_offset_(0), - reference_queue_offset_(0), - reference_queueNext_offset_(0), - reference_pendingNext_offset_(0), - finalizer_reference_zombie_offset_(0), min_free_(min_free), max_free_(max_free), target_utilization_(target_utilization), @@ -792,29 +783,12 @@ void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft, timings.EndSplit(); } -bool Heap::IsEnqueued(mirror::Object* ref) const { - // Since the references are stored as cyclic lists it means that once enqueued, the pending next - // will always be non-null. - return ref->GetFieldObject(GetReferencePendingNextOffset(), false) != nullptr; -} - -bool Heap::IsEnqueuable(mirror::Object* ref) const { - DCHECK(ref != nullptr); - const mirror::Object* queue = - ref->GetFieldObject(GetReferenceQueueOffset(), false); - const mirror::Object* queue_next = - ref->GetFieldObject(GetReferenceQueueNextOffset(), false); - return queue != nullptr && queue_next == nullptr; -} - // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been // marked, put it on the appropriate list in the heap for later processing. -void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, +void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, IsMarkedCallback is_marked_callback, void* arg) { - DCHECK(klass != nullptr); - DCHECK(klass->IsReferenceClass()); - DCHECK(obj != nullptr); - mirror::Object* referent = GetReferenceReferent(obj); + DCHECK_EQ(klass, ref->GetClass()); + mirror::Object* referent = ref->GetReferent(); if (referent != nullptr) { mirror::Object* forward_address = is_marked_callback(referent, arg); // Null means that the object is not currently marked. @@ -824,20 +798,20 @@ void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, // We need to check that the references haven't already been enqueued since we can end up // scanning the same reference multiple times due to dirty cards. if (klass->IsSoftReferenceClass()) { - soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); + soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); } else if (klass->IsWeakReferenceClass()) { - weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); + weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); } else if (klass->IsFinalizerReferenceClass()) { - finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); + finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); } else if (klass->IsPhantomReferenceClass()) { - phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); + phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); } else { LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags(); } } else if (referent != forward_address) { // Referent is already marked and we need to update it. - SetReferenceReferent(obj, forward_address); + ref->SetReferent(forward_address); } } } @@ -2013,8 +1987,9 @@ class VerifyObjectVisitor { VerifyReferenceVisitor visitor(heap_); // The class doesn't count as a reference but we should verify it anyways. collector::MarkSweep::VisitObjectReferences(obj, visitor, true); - if (obj->GetClass()->IsReferenceClass()) { - visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false); + if (obj->IsReferenceInstance()) { + mirror::Reference* ref = obj->AsReference(); + visitor(obj, ref->GetReferent(), mirror::Reference::ReferentOffset(), false); } failed_ = failed_ || visitor.Failed(); } @@ -2476,35 +2451,6 @@ void Heap::ClearGrowthLimit() { non_moving_space_->ClearGrowthLimit(); } -void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, - MemberOffset reference_queue_offset, - MemberOffset reference_queueNext_offset, - MemberOffset reference_pendingNext_offset, - MemberOffset finalizer_reference_zombie_offset) { - reference_referent_offset_ = reference_referent_offset; - reference_queue_offset_ = reference_queue_offset; - reference_queueNext_offset_ = reference_queueNext_offset; - reference_pendingNext_offset_ = reference_pendingNext_offset; - finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; - CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); - CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); - CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); - CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); - CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); -} - -void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) { - DCHECK(reference != NULL); - DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); - reference->SetFieldObject(reference_referent_offset_, referent, true); -} - -mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { - DCHECK(reference != NULL); - DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); - return reference->GetFieldObject(reference_referent_offset_, true); -} - void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { ScopedObjectAccess soa(self); JValue result; diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 12c55c4..1e0a596 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -312,26 +312,6 @@ class Heap { return discontinuous_spaces_; } - void SetReferenceOffsets(MemberOffset reference_referent_offset, - MemberOffset reference_queue_offset, - MemberOffset reference_queueNext_offset, - MemberOffset reference_pendingNext_offset, - MemberOffset finalizer_reference_zombie_offset); - MemberOffset GetReferenceReferentOffset() const { - return reference_referent_offset_; - } - MemberOffset GetReferenceQueueOffset() const { - return reference_queue_offset_; - } - MemberOffset GetReferenceQueueNextOffset() const { - return reference_queueNext_offset_; - } - MemberOffset GetReferencePendingNextOffset() const { - return reference_pendingNext_offset_; - } - MemberOffset GetFinalizerReferenceZombieOffset() const { - return finalizer_reference_zombie_offset_; - } static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg); void ProcessReferences(TimingLogger& timings, bool clear_soft, IsMarkedCallback* is_marked_callback, @@ -624,20 +604,9 @@ class Heap { bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Pushes a list of cleared references out to the managed heap. - void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Object* GetReferenceReferent(mirror::Object* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ClearReferenceReferent(mirror::Object* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetReferenceReferent(reference, nullptr); - } void EnqueueClearedReferences(); // Returns true if the reference object has not yet been enqueued. - bool IsEnqueuable(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsEnqueued(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, + void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, IsMarkedCallback is_marked_callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -918,17 +887,6 @@ class Heap { // Temp space is the space which the semispace collector copies to. space::BumpPointerSpace* temp_space_; - // offset of java.lang.ref.Reference.referent - MemberOffset reference_referent_offset_; - // offset of java.lang.ref.Reference.queue - MemberOffset reference_queue_offset_; - // offset of java.lang.ref.Reference.queueNext - MemberOffset reference_queueNext_offset_; - // offset of java.lang.ref.Reference.pendingNext - MemberOffset reference_pendingNext_offset_; - // offset of java.lang.ref.FinalizerReference.zombie - MemberOffset finalizer_reference_zombie_offset_; - // Minimum free guarantees that you always have at least min_free_ free bytes after growing for // utilization, regardless of target utilization ratio. size_t min_free_; diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index 203701f..aee7891 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc @@ -20,91 +20,84 @@ #include "heap.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" +#include "mirror/reference-inl.h" namespace art { namespace gc { -ReferenceQueue::ReferenceQueue(Heap* heap) +ReferenceQueue::ReferenceQueue() : lock_("reference queue lock"), - heap_(heap), list_(nullptr) { } -void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref) { +void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) { DCHECK(ref != NULL); MutexLock mu(self, lock_); - if (!heap_->IsEnqueued(ref)) { + if (!ref->IsEnqueued()) { EnqueuePendingReference(ref); } } -void ReferenceQueue::EnqueueReference(mirror::Object* ref) { - CHECK(heap_->IsEnqueuable(ref)); +void ReferenceQueue::EnqueueReference(mirror::Reference* ref) { + CHECK(ref->IsEnqueuable()); EnqueuePendingReference(ref); } -void ReferenceQueue::EnqueuePendingReference(mirror::Object* ref) { +void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) { DCHECK(ref != NULL); - MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset(); - DCHECK_NE(pending_next_offset.Uint32Value(), 0U); if (IsEmpty()) { // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; - if (Runtime::Current()->IsActiveTransaction()) { - ref->SetFieldObject(pending_next_offset, ref, false); - } else { - ref->SetFieldObject(pending_next_offset, ref, false); - } list_ = ref; } else { - mirror::Object* head = list_->GetFieldObject(pending_next_offset, false); + mirror::Reference* head = list_->GetPendingNext(); if (Runtime::Current()->IsActiveTransaction()) { - ref->SetFieldObject(pending_next_offset, head, false); - list_->SetFieldObject(pending_next_offset, ref, false); + ref->SetPendingNext(head); } else { - ref->SetFieldObject(pending_next_offset, head, false); - list_->SetFieldObject(pending_next_offset, ref, false); + ref->SetPendingNext(head); } } + if (Runtime::Current()->IsActiveTransaction()) { + list_->SetPendingNext(ref); + } else { + list_->SetPendingNext(ref); + } } -mirror::Object* ReferenceQueue::DequeuePendingReference() { +mirror::Reference* ReferenceQueue::DequeuePendingReference() { DCHECK(!IsEmpty()); - MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset(); - mirror::Object* head = list_->GetFieldObject(pending_next_offset, false); + mirror::Reference* head = list_->GetPendingNext(); DCHECK(head != nullptr); - mirror::Object* ref; + mirror::Reference* ref; // Note: the following code is thread-safe because it is only called from ProcessReferences which // is single threaded. if (list_ == head) { ref = list_; list_ = nullptr; } else { - mirror::Object* next = head->GetFieldObject(pending_next_offset, false); + mirror::Reference* next = head->GetPendingNext(); if (Runtime::Current()->IsActiveTransaction()) { - list_->SetFieldObject(pending_next_offset, next, false); + list_->SetPendingNext(next); } else { - list_->SetFieldObject(pending_next_offset, next, false); + list_->SetPendingNext(next); } ref = head; } if (Runtime::Current()->IsActiveTransaction()) { - ref->SetFieldObject(pending_next_offset, nullptr, false); + ref->SetPendingNext(nullptr); } else { - ref->SetFieldObject(pending_next_offset, nullptr, false); + ref->SetPendingNext(nullptr); } return ref; } void ReferenceQueue::Dump(std::ostream& os) const { - mirror::Object* cur = list_; + mirror::Reference* cur = list_; os << "Reference starting at list_=" << list_ << "\n"; while (cur != nullptr) { - mirror::Object* pending_next = - cur->GetFieldObject(heap_->GetReferencePendingNextOffset(), false); + mirror::Reference* pending_next = cur->GetPendingNext(); os << "PendingNext=" << pending_next; - if (cur->GetClass()->IsFinalizerReferenceClass()) { - os << " Zombie=" << - cur->GetFieldObject(heap_->GetFinalizerReferenceZombieOffset(), false); + if (cur->IsFinalizerReferenceInstance()) { + os << " Zombie=" << cur->AsFinalizerReference()->GetZombie(); } os << "\n"; cur = pending_next; @@ -115,19 +108,23 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references, IsMarkedCallback* preserve_callback, void* arg) { while (!IsEmpty()) { - mirror::Object* ref = DequeuePendingReference(); - mirror::Object* referent = heap_->GetReferenceReferent(ref); + mirror::Reference* ref = DequeuePendingReference(); + mirror::Object* referent = ref->GetReferent(); if (referent != nullptr) { mirror::Object* forward_address = preserve_callback(referent, arg); if (forward_address == nullptr) { // Referent is white, clear it. - heap_->ClearReferenceReferent(ref); - if (heap_->IsEnqueuable(ref)) { + if (Runtime::Current()->IsActiveTransaction()) { + ref->ClearReferent(); + } else { + ref->ClearReferent(); + } + if (ref->IsEnqueuable()) { cleared_references.EnqueuePendingReference(ref); } } else if (referent != forward_address) { // Object moved, need to updated the referent. - heap_->SetReferenceReferent(ref, forward_address); + ref->SetReferent(forward_address); } } } @@ -138,42 +135,43 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_referenc MarkObjectCallback recursive_mark_callback, void* arg) { while (!IsEmpty()) { - mirror::Object* ref = DequeuePendingReference(); - mirror::Object* referent = heap_->GetReferenceReferent(ref); + mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference(); + mirror::Object* referent = ref->GetReferent(); if (referent != nullptr) { mirror::Object* forward_address = is_marked_callback(referent, arg); // If the referent isn't marked, mark it and update the if (forward_address == nullptr) { forward_address = recursive_mark_callback(referent, arg); // If the referent is non-null the reference must queuable. - DCHECK(heap_->IsEnqueuable(ref)); + DCHECK(ref->IsEnqueuable()); // Move the updated referent to the zombie field. if (Runtime::Current()->IsActiveTransaction()) { - ref->SetFieldObject(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false); + ref->SetZombie(forward_address); + ref->ClearReferent(); } else { - ref->SetFieldObject(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false); + ref->SetZombie(forward_address); + ref->ClearReferent(); } - heap_->ClearReferenceReferent(ref); cleared_references.EnqueueReference(ref); } else if (referent != forward_address) { - heap_->SetReferenceReferent(ref, forward_address); + ref->SetReferent(forward_address); } } } } void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback preserve_callback, void* arg) { - ReferenceQueue cleared(heap_); + ReferenceQueue cleared; while (!IsEmpty()) { - mirror::Object* ref = DequeuePendingReference(); - mirror::Object* referent = heap_->GetReferenceReferent(ref); + mirror::Reference* ref = DequeuePendingReference(); + mirror::Object* referent = ref->GetReferent(); if (referent != nullptr) { mirror::Object* forward_address = preserve_callback(referent, arg); if (forward_address == nullptr) { // Either the reference isn't marked or we don't wish to preserve it. cleared.EnqueuePendingReference(ref); } else if (forward_address != referent) { - heap_->SetReferenceReferent(ref, forward_address); + ref->SetReferent(forward_address); } } } diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h index 99314ba..8d392ba 100644 --- a/runtime/gc/reference_queue.h +++ b/runtime/gc/reference_queue.h @@ -31,6 +31,10 @@ #include "thread_pool.h" namespace art { +namespace mirror { +class Reference; +} // namespace mirror + namespace gc { class Heap; @@ -40,18 +44,18 @@ class Heap; // java.lang.ref.Reference objects. class ReferenceQueue { public: - explicit ReferenceQueue(Heap* heap); + explicit ReferenceQueue(); // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads // since it uses a lock to avoid a race between checking for the references presence and adding // it. - void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref) + void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock // overhead. - void EnqueueReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnqueuePendingReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Object* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Enqueues finalizer references with white referents. White referents are blackened, moved to the // zombie field, and the referent field is cleared. void EnqueueFinalizerReferences(ReferenceQueue& cleared_references, @@ -76,7 +80,7 @@ class ReferenceQueue { void Clear() { list_ = nullptr; } - mirror::Object* GetList() { + mirror::Reference* GetList() { return list_; } @@ -84,10 +88,8 @@ class ReferenceQueue { // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously // calling AtomicEnqueueIfNotEnqueued. Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - // The heap contains the reference offsets. - Heap* const heap_; // The actual reference list. Not a root since it will be nullptr when the GC is not running. - mirror::Object* list_; + mirror::Reference* list_; }; } // namespace gc -- cgit v1.1