summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-09-18 14:27:04 -0700
committerAndroid Git Automerger <android-git-automerger@android.com>2013-09-18 14:27:04 -0700
commitb5a80a1eb89f621950aa9a93c0c6655a24c7cc96 (patch)
tree62e4e5f59729e47d024aa883a48eca2458ddcbf2
parent5b0daf5ce04f58e08a5bc38a85480b816e3812b2 (diff)
parentba5c7a7eed2911d41946ef96e8bc2cfc11442e85 (diff)
downloadart-b5a80a1eb89f621950aa9a93c0c6655a24c7cc96.zip
art-b5a80a1eb89f621950aa9a93c0c6655a24c7cc96.tar.gz
art-b5a80a1eb89f621950aa9a93c0c6655a24c7cc96.tar.bz2
am ba5c7a7e: am 3c60d137: Merge "Fix soft reference clearing issue." into klp-dev
* commit 'ba5c7a7eed2911d41946ef96e8bc2cfc11442e85': Fix soft reference clearing issue.
-rw-r--r--runtime/gc/collector/mark_sweep.cc19
-rw-r--r--runtime/gc/heap.cc27
-rw-r--r--runtime/gc/heap.h7
3 files changed, 34 insertions, 19 deletions
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 953fbf9..92bfc4e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1342,18 +1342,28 @@ void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
}
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
+ // We need to check that the references haven't already been enqueued since we can end up
+ // scanning the same reference multiple times due to dirty cards.
if (klass->IsSoftReferenceClass()) {
MutexLock mu(self, *heap_->GetSoftRefQueueLock());
- heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+ }
} else if (klass->IsWeakReferenceClass()) {
MutexLock mu(self, *heap_->GetWeakRefQueueLock());
- heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+ }
} else if (klass->IsFinalizerReferenceClass()) {
MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
- heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+ }
} else if (klass->IsPhantomReferenceClass()) {
MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
- heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+ }
} else {
LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
<< " " << std::hex << klass->GetAccessFlags();
@@ -1499,7 +1509,6 @@ inline bool MarkSweep::IsMarked(const Object* object) const
return heap_->GetMarkBitmap()->Test(object);
}
-
// Unlink the reference list clearing references objects with white
// referents. Cleared references registered to a reference queue are
// scheduled for appending by the heap worker thread.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e0048a0..63f0405 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1858,21 +1858,24 @@ void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_refere
EnqueuePendingReference(ref, cleared_reference_list);
}
+bool Heap::IsEnqueued(mirror::Object* ref) {
+ // Since the references are stored as cyclic lists it means that once enqueued, the pending next
+ // will always be non-null.
+ return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
+}
+
void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) {
DCHECK(ref != NULL);
DCHECK(list != NULL);
- mirror::Object* pending =
- ref->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
- if (pending == NULL) {
- if (*list == NULL) {
- ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
- *list = ref;
- } else {
- mirror::Object* head =
- (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
- ref->SetFieldObject(reference_pendingNext_offset_, head, false);
- (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
- }
+ if (*list == NULL) {
+ // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
+ ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
+ *list = ref;
+ } else {
+ mirror::Object* head =
+ (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
+ ref->SetFieldObject(reference_pendingNext_offset_, head, false);
+ (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index c93dacb..6782a51 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -228,10 +228,13 @@ class Heap {
// Returns true if the reference object has not yet been enqueued.
bool IsEnqueuable(const mirror::Object* ref);
- void EnqueueReference(mirror::Object* ref, mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueueReference(mirror::Object* ref, mirror::Object** list)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsEnqueued(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* DequeuePendingReference(mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* DequeuePendingReference(mirror::Object** list)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
MemberOffset GetReferencePendingNextOffset() {
DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);