summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-09-06 16:40:01 -0700
committerMathieu Chartier <mathieuc@google.com>2013-09-19 09:47:39 -0700
commit0f72e4136aecaf6976fdb55916bbd7b6d5c9c77b (patch)
treef66e033a77c0e9bfb39bde51305f8d155b2a73a3 /runtime
parent9e452d1d097bc0f29a24e13ced5477fa3c9463f9 (diff)
downloadart-0f72e4136aecaf6976fdb55916bbd7b6d5c9c77b.zip
art-0f72e4136aecaf6976fdb55916bbd7b6d5c9c77b.tar.gz
art-0f72e4136aecaf6976fdb55916bbd7b6d5c9c77b.tar.bz2
Improve heap verification.
Re-enabled checking the allocation stack for heap verification. Added tracking of recent frees in DlMallocSpace if debug spaces is enabled. This is useful when you have heap corruption caused by a live object referencing a recently freed object. Added various other sanity checks in the GC. Bug: 10626133 Change-Id: I5ada11966336ae9a06615b16f4b933f05b5d0c32
Diffstat (limited to 'runtime')
-rw-r--r--runtime/gc/accounting/card_table-inl.h16
-rw-r--r--runtime/gc/accounting/card_table.h6
-rw-r--r--runtime/gc/collector/mark_sweep.cc74
-rw-r--r--runtime/gc/collector/mark_sweep.h6
-rw-r--r--runtime/gc/heap.cc126
-rw-r--r--runtime/gc/heap.h6
-rw-r--r--runtime/gc/space/dlmalloc_space.cc42
-rw-r--r--runtime/gc/space/dlmalloc_space.h12
-rw-r--r--runtime/gc/space/space.h2
9 files changed, 213 insertions, 77 deletions
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 0cfbd6f..45ab214 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -42,21 +42,22 @@ static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
}
template <typename Visitor>
-inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
- const Visitor& visitor, const byte minimum_age) const {
+inline size_t CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
+ const Visitor& visitor, const byte minimum_age) const {
DCHECK(bitmap->HasAddress(scan_begin));
DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan.
byte* card_cur = CardFromAddr(scan_begin);
byte* card_end = CardFromAddr(scan_end);
CheckCardValid(card_cur);
CheckCardValid(card_end);
+ size_t cards_scanned = 0;
// Handle any unaligned cards at the start.
while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
if (*card_cur >= minimum_age) {
uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
- uintptr_t end = start + kCardSize;
- bitmap->VisitMarkedRange(start, end, visitor);
+ bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
+ ++cards_scanned;
}
++card_cur;
}
@@ -85,6 +86,7 @@ inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_en
DCHECK(*card == static_cast<byte>(start_word) || *card == kCardDirty)
<< "card " << static_cast<size_t>(*card) << " word " << (start_word & 0xFF);
bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
+ ++cards_scanned;
}
start_word >>= 8;
start += kCardSize;
@@ -97,11 +99,13 @@ inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_en
while (card_cur < card_end) {
if (*card_cur >= minimum_age) {
uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
- uintptr_t end = start + kCardSize;
- bitmap->VisitMarkedRange(start, end, visitor);
+ bitmap->VisitMarkedRange(start, start + kCardSize, visitor);
+ ++cards_scanned;
}
++card_cur;
}
+
+ return cards_scanned;
}
/*
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 37e719f..bb4d1d7 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -100,10 +100,10 @@ class CardTable {
const ModifiedVisitor& modified);
// For every dirty at least minumum age between begin and end invoke the visitor with the
- // specified argument.
+ // specified argument. Returns how many cards the visitor was run on.
template <typename Visitor>
- void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor,
- const byte minimum_age = kCardDirty) const
+ size_t Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor,
+ const byte minimum_age = kCardDirty) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index fa1e4e8..167140c 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -208,17 +208,26 @@ bool MarkSweep::HandleDirtyObjectsPhase() {
ProcessReferences(self);
{
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}
// Only need to do this if we have the card mark verification on, and only during concurrent GC.
- if (GetHeap()->verify_missing_card_marks_) {
+ if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
+ GetHeap()->verify_post_gc_heap_) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// This second sweep makes sure that we don't have any objects in the live stack which point to
// freed objects. These cause problems since their references may be previously freed objects.
SweepArray(GetHeap()->allocation_stack_.get(), false);
}
+
+ timings_.StartSplit("PreSweepingGcVerification");
+ heap_->PreSweepingGcVerification(this);
+ timings_.EndSplit();
+
+ // Ensure that nobody inserted items in the live stack after we swapped the stacks.
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
return true;
}
@@ -228,19 +237,18 @@ bool MarkSweep::IsConcurrent() const {
void MarkSweep::MarkingPhase() {
base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
- Heap* heap = GetHeap();
Thread* self = Thread::Current();
BindBitmaps();
FindDefaultMarkBitmap();
// Process dirty cards and add dirty cards to mod union tables.
- heap->ProcessCards(timings_);
+ heap_->ProcessCards(timings_);
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
timings_.NewSplit("SwapStacks");
- heap->SwapStacks();
+ heap_->SwapStacks();
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
@@ -248,11 +256,13 @@ void MarkSweep::MarkingPhase() {
MarkRoots();
} else {
MarkThreadRoots(self);
+ // At this point the live stack should no longer have any mutators which push into it.
MarkNonThreadRoots();
}
+ live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
MarkConcurrentRoots();
- heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
+ heap_->UpdateAndMarkModUnion(this, timings_, GetGcType());
MarkReachableObjects();
}
@@ -279,12 +289,17 @@ void MarkSweep::ReclaimPhase() {
if (!IsConcurrent()) {
ProcessReferences(self);
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- SweepSystemWeaks();
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ SweepSystemWeaks();
+ }
+ timings_.StartSplit("PreSweepingGcVerification");
+ heap_->PreSweepingGcVerification(this);
+ timings_.EndSplit();
} else {
base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
- accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
// The allocation stack contains things allocated since the start of the GC. These may have been
// marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
// Remove these objects from the mark bitmaps so that they will be eligible for sticky
@@ -308,9 +323,6 @@ void MarkSweep::ReclaimPhase() {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
VerifyImageRoots();
}
- timings_.StartSplit("PreSweepingGcVerification");
- heap_->PreSweepingGcVerification(this);
- timings_.EndSplit();
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -421,9 +433,9 @@ inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
// This object was not previously marked.
if (!object_bitmap->Test(obj)) {
object_bitmap->Set(obj);
- // Lock is not needed but is here anyways to please annotalysis.
- MutexLock mu(Thread::Current(), mark_stack_lock_);
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+ // Lock is not needed but is here anyways to please annotalysis.
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
ExpandMarkStack();
}
// The object must be pushed on to the mark stack.
@@ -748,7 +760,10 @@ class CardScanTask : public MarkStackTask<false> {
virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
ScanObjectParallelVisitor visitor(this);
accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
- card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
+ size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
+ mark_sweep_->cards_scanned_.fetch_add(cards_scanned);
+ VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
+ << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
// Finish by emptying our local mark stack.
MarkStackTask::Run(self);
}
@@ -784,6 +799,8 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
DCHECK_NE(mark_stack_tasks, 0U);
const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
mark_stack_size / mark_stack_tasks + 1);
+ size_t ref_card_count = 0;
+ cards_scanned_ = 0;
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
byte* card_begin = space->Begin();
byte* card_end = space->End();
@@ -810,11 +827,24 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
thread_pool->AddTask(self, task);
card_begin += card_increment;
}
+
+ if (paused && kIsDebugBuild) {
+ // Make sure we don't miss scanning any cards.
+ size_t scanned_cards = card_table->Scan(space->GetMarkBitmap(), space->Begin(),
+ space->End(), VoidFunctor(), minimum_age);
+ VLOG(heap) << "Scanning space cards " << reinterpret_cast<void*>(space->Begin()) << " - "
+ << reinterpret_cast<void*>(space->End()) << " = " << scanned_cards;
+ ref_card_count += scanned_cards;
+ }
}
+
thread_pool->SetMaxActiveWorkers(thread_count - 1);
thread_pool->StartWorkers(self);
thread_pool->Wait(self, true, true);
thread_pool->StopWorkers(self);
+ if (paused) {
+ DCHECK_EQ(ref_card_count, static_cast<size_t>(cards_scanned_.load()));
+ }
timings_.EndSplit();
} else {
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -993,7 +1023,10 @@ bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
return true;
}
accounting::ObjectStack* live_stack = array_check->live_stack;
- return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
+ if (std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End()) {
+ return true;
+ }
+ return false;
}
void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
@@ -1557,10 +1590,11 @@ void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
Object** weak_references,
Object** finalizer_references,
Object** phantom_references) {
- DCHECK(soft_references != NULL);
- DCHECK(weak_references != NULL);
- DCHECK(finalizer_references != NULL);
- DCHECK(phantom_references != NULL);
+ CHECK(soft_references != NULL);
+ CHECK(weak_references != NULL);
+ CHECK(finalizer_references != NULL);
+ CHECK(phantom_references != NULL);
+ CHECK(mark_stack_->IsEmpty());
// Unless we are in the zygote or required to clear soft references
// with white references, preserve some white referents.
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index fdd0c86..435b086 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -421,7 +421,6 @@ class MarkSweep : public GarbageCollector {
// Parallel finger.
AtomicInteger atomic_finger_;
-
// Number of non large object bytes freed in this collection.
AtomicInteger freed_bytes_;
// Number of large object bytes freed.
@@ -443,6 +442,10 @@ class MarkSweep : public GarbageCollector {
AtomicInteger work_chunks_created_;
AtomicInteger work_chunks_deleted_;
AtomicInteger reference_count_;
+ AtomicInteger cards_scanned_;
+
+ // Verification.
+ size_t live_stack_freeze_size_;
UniquePtr<Barrier> gc_barrier_;
Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -454,6 +457,7 @@ class MarkSweep : public GarbageCollector {
private:
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
+ friend class CardScanTask;
friend class CheckBitmapVisitor;
friend class CheckReferenceVisitor;
friend class art::gc::Heap;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 63f0405..1b46257 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -643,7 +643,8 @@ bool Heap::IsHeapAddress(const mirror::Object* obj) {
return FindSpaceFromObject(obj, true) != NULL;
}
-bool Heap::IsLiveObjectLocked(const mirror::Object* obj) {
+bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
+ bool search_live_stack, bool sorted) {
// Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
return false;
@@ -663,12 +664,30 @@ bool Heap::IsLiveObjectLocked(const mirror::Object* obj) {
}
}
// This is covering the allocation/live stack swapping that is done without mutators suspended.
- for (size_t i = 0; i < 5; ++i) {
- if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) ||
- live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
- return true;
+ for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
+ if (i > 0) {
+ NanoSleep(MsToNs(10));
+ }
+
+ if (search_allocation_stack) {
+ if (sorted) {
+ if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
+ return true;
+ }
+ } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
+ return true;
+ }
+ }
+
+ if (search_live_stack) {
+ if (sorted) {
+ if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
+ return true;
+ }
+ } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
+ return true;
+ }
}
- NanoSleep(MsToNs(10));
}
// We need to check the bitmaps again since there is a race where we mark something as live and
// then clear the stack containing it.
@@ -707,10 +726,9 @@ void Heap::DumpSpaces() {
}
void Heap::VerifyObjectBody(const mirror::Object* obj) {
- if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
- LOG(FATAL) << "Object isn't aligned: " << obj;
- }
- if (UNLIKELY(GetObjectsAllocated() <= 10)) { // Ignore early dawn of the universe verifications.
+ CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+ // Ignore early dawn of the universe verifications.
+ if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.load()) < 10 * KB)) {
return;
}
const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
@@ -1332,7 +1350,7 @@ static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
class ScanVisitor {
public:
void operator()(const mirror::Object* obj) const {
- LOG(INFO) << "Would have rescanned object " << obj;
+ LOG(ERROR) << "Would have rescanned object " << obj;
}
};
@@ -1358,12 +1376,42 @@ class VerifyReferenceVisitor {
accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
accounting::ObjectStack* live_stack = heap_->live_stack_.get();
- if (obj != NULL) {
+ if (!failed_) {
+ // Print message on only on first failure to prevent spam.
+ LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
+ failed_ = true;
+ }
+ if (obj != nullptr) {
byte* card_addr = card_table->CardFromAddr(obj);
- LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " << offset
- << "\nIsDirty = " << (*card_addr == accounting::CardTable::kCardDirty)
- << "\nObj type " << PrettyTypeOf(obj)
- << "\nRef type " << PrettyTypeOf(ref);
+ LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
+ << offset << "\n card value = " << static_cast<int>(*card_addr);
+ if (heap_->IsHeapAddress(obj->GetClass())) {
+ LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
+ } else {
+ LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
+ }
+
+ // Attmept to find the class inside of the recently freed objects.
+ space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
+ if (ref_space->IsDlMallocSpace()) {
+ space::DlMallocSpace* space = ref_space->AsDlMallocSpace();
+ mirror::Class* ref_class = space->FindRecentFreedObject(ref);
+ if (ref_class != nullptr) {
+ LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
+ << PrettyClass(ref_class);
+ } else {
+ LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
+ }
+ }
+
+ if (ref->GetClass() != nullptr && heap_->IsHeapAddress(ref->GetClass()) &&
+ ref->GetClass()->IsClass()) {
+ LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
+ } else {
+ LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
+ << ") is not a valid heap address";
+ }
+
card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
void* cover_begin = card_table->AddrFromCard(card_addr);
void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
@@ -1376,12 +1424,18 @@ class VerifyReferenceVisitor {
if (bitmap != NULL && bitmap->Test(obj)) {
LOG(ERROR) << "Object " << obj << " found in live bitmap";
}
- if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
+ if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
LOG(ERROR) << "Object " << obj << " found in allocation stack";
}
- if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
+ if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
LOG(ERROR) << "Object " << obj << " found in live stack";
}
+ if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
+ LOG(ERROR) << "Ref " << ref << " found in allocation stack";
+ }
+ if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
+ LOG(ERROR) << "Ref " << ref << " found in live stack";
+ }
// Attempt to see if the card table missed the reference.
ScanVisitor scan_visitor;
byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
@@ -1398,20 +1452,11 @@ class VerifyReferenceVisitor {
} else {
LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
}
- if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
- LOG(ERROR) << "Reference " << ref << " found in allocation stack!";
- }
- if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
- LOG(ERROR) << "Reference " << ref << " found in live stack!";
- }
- heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: ");
- heap_->zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: ");
- failed_ = true;
}
}
bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
- return heap_->IsLiveObjectLocked(obj);
+ return heap_->IsLiveObjectLocked(obj, true, false, true);
}
static void VerifyRoots(const mirror::Object* root, void* arg) {
@@ -1434,6 +1479,8 @@ class VerifyObjectVisitor {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_);
+ // The class doesn't count as a reference but we should verify it anyways.
+ visitor(obj, obj->GetClass(), MemberOffset(0), false);
collector::MarkSweep::VisitObjectReferences(obj, visitor);
failed_ = failed_ || visitor.Failed();
}
@@ -1457,9 +1504,18 @@ bool Heap::VerifyHeapReferences() {
VerifyObjectVisitor visitor(this);
Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
GetLiveBitmap()->Visit(visitor);
- // We don't want to verify the objects in the allocation stack since they themselves may be
+ // Verify objects in the allocation stack since these will be objects which were:
+ // 1. Allocated prior to the GC (pre GC verification).
+ // 2. Allocated during the GC (pre sweep GC verification).
+ for (mirror::Object** it = allocation_stack_->Begin(); it != allocation_stack_->End(); ++it) {
+ visitor(*it);
+ }
+ // We don't want to verify the objects in the live stack since they themselves may be
// pointing to dead objects if they are not reachable.
if (visitor.Failed()) {
+ // Dump mod-union tables.
+ image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: ");
+ zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: ");
DumpSpaces();
return false;
}
@@ -1580,11 +1636,6 @@ bool Heap::VerifyMissingCardMarks() {
void Heap::SwapStacks() {
allocation_stack_.swap(live_stack_);
-
- // Sort the live stack so that we can quickly binary search it later.
- if (verify_object_mode_ > kNoHeapVerification) {
- live_stack_->Sort();
- }
}
void Heap::ProcessCards(base::TimingLogger& timings) {
@@ -1650,22 +1701,17 @@ void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
// Called before sweeping occurs since we want to make sure we are not going so reclaim any
// reachable objects.
if (verify_post_gc_heap_) {
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
CHECK_NE(self->GetState(), kRunnable);
- Locks::mutator_lock_->SharedUnlock(self);
- thread_list->SuspendAll();
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Swapping bound bitmaps does nothing.
gc->SwapBitmaps();
if (!VerifyHeapReferences()) {
- LOG(FATAL) << "Post " << gc->GetName() << "GC verification failed";
+ LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
}
gc->SwapBitmaps();
}
- thread_list->ResumeAll();
- Locks::mutator_lock_->SharedLock(self);
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6782a51..0b64261 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -146,7 +146,8 @@ class Heap {
// Check sanity of all live references.
void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
bool VerifyHeapReferences()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool VerifyMissingCardMarks()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -158,7 +159,8 @@ class Heap {
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
- bool IsLiveObjectLocked(const mirror::Object* obj)
+ bool IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack = true,
+ bool search_live_stack = true, bool sorted = false)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Initiates an explicit garbage collection.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 8b99e96..a9440d3 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -17,6 +17,7 @@
#include "dlmalloc_space-inl.h"
#include "gc/accounting/card_table.h"
#include "gc/heap.h"
+#include "mirror/object-inl.h"
#include "runtime.h"
#include "thread.h"
#include "utils.h"
@@ -118,8 +119,9 @@ size_t DlMallocSpace::bitmap_index_ = 0;
DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
byte* end, size_t growth_limit)
: MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
- num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
- total_objects_allocated_(0), lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
+ recent_free_pos_(0), num_bytes_allocated_(0), num_objects_allocated_(0),
+ total_bytes_allocated_(0), total_objects_allocated_(0),
+ lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
growth_limit_(growth_limit) {
CHECK(mspace != NULL);
@@ -137,6 +139,11 @@ DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* msp
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
+
+ for (auto& freed : recent_freed_objects_) {
+ freed.first = nullptr;
+ freed.second = nullptr;
+ }
}
DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t
@@ -318,6 +325,27 @@ DlMallocSpace* DlMallocSpace::CreateZygoteSpace(const char* alloc_space_name) {
return alloc_space;
}
+mirror::Class* DlMallocSpace::FindRecentFreedObject(const mirror::Object* obj) {
+ size_t pos = recent_free_pos_;
+ // Start at the most recently freed object and work our way back since there may be duplicates
+ // caused by dlmalloc reusing memory.
+ if (kRecentFreeCount > 0) {
+ for (size_t i = 0; i + 1 < kRecentFreeCount + 1; ++i) {
+ pos = pos != 0 ? pos - 1 : kRecentFreeMask;
+ if (recent_freed_objects_[pos].first == obj) {
+ return recent_freed_objects_[pos].second;
+ }
+ }
+ }
+ return nullptr;
+}
+
+void DlMallocSpace::RegisterRecentFree(mirror::Object* ptr) {
+ recent_freed_objects_[recent_free_pos_].first = ptr;
+ recent_freed_objects_[recent_free_pos_].second = ptr->GetClass();
+ recent_free_pos_ = (recent_free_pos_ + 1) & kRecentFreeMask;
+}
+
size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
MutexLock mu(self, lock_);
if (kDebugSpaces) {
@@ -327,6 +355,9 @@ size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
const size_t bytes_freed = InternalAllocationSize(ptr);
num_bytes_allocated_ -= bytes_freed;
--num_objects_allocated_;
+ if (kRecentFreeCount > 0) {
+ RegisterRecentFree(ptr);
+ }
mspace_free(mspace_, ptr);
return bytes_freed;
}
@@ -346,6 +377,13 @@ size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p
bytes_freed += InternalAllocationSize(ptr);
}
+ if (kRecentFreeCount > 0) {
+ MutexLock mu(self, lock_);
+ for (size_t i = 0; i < num_ptrs; i++) {
+ RegisterRecentFree(ptrs[i]);
+ }
+ }
+
if (kDebugSpaces) {
size_t num_broken_ptrs = 0;
for (size_t i = 0; i < num_ptrs; i++) {
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 6d52c26..b08da01 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -143,6 +143,9 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace {
return total_objects_allocated_;
}
+ // Returns the class of a recently freed object.
+ mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
+
protected:
DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
size_t growth_limit);
@@ -151,15 +154,20 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace {
size_t InternalAllocationSize(const mirror::Object* obj);
mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
-
+ void RegisterRecentFree(mirror::Object* ptr);
static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
UniquePtr<accounting::SpaceBitmap> live_bitmap_;
UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
+ // Recent allocation buffer.
+ static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
+ static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
+ std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
+ size_t recent_free_pos_;
+
// Approximate number of bytes which have been allocated into the space.
size_t num_bytes_allocated_;
size_t num_objects_allocated_;
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 231cabc..68df563 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -46,7 +46,7 @@ class DlMallocSpace;
class ImageSpace;
class LargeObjectSpace;
-static const bool kDebugSpaces = kIsDebugBuild;
+static constexpr bool kDebugSpaces = kIsDebugBuild;
// See Space::GetGcRetentionPolicy.
enum GcRetentionPolicy {