summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h124
-rw-r--r--runtime/gc/accounting/space_bitmap.cc27
-rw-r--r--runtime/gc/accounting/space_bitmap.h15
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc73
-rw-r--r--runtime/gc/allocator/rosalloc.cc2
-rw-r--r--runtime/gc/heap-inl.h18
-rw-r--r--runtime/gc/heap.h15
7 files changed, 190 insertions, 84 deletions
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index d6d1b3e..880ff1f 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -29,10 +29,10 @@ inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
- const word mask = OffsetToMask(offset);
- word* const address = &bitmap_begin_[index];
+ const uword mask = OffsetToMask(offset);
+ uword* const address = &bitmap_begin_[index];
DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- word old_word;
+ uword old_word;
do {
old_word = *address;
// Fast path: The bit is already set.
@@ -58,8 +58,7 @@ template <typename Visitor>
void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
const Visitor& visitor) const {
DCHECK_LT(visit_begin, visit_end);
-#ifdef __LP64__
- // TODO: make the optimized code below work in the 64bit case.
+#if 0
for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(i);
if (Test(obj)) {
@@ -67,63 +66,84 @@ void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
}
}
#else
- const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
- const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
+ DCHECK_LE(heap_begin_, visit_begin);
+ DCHECK_LE(visit_end, HeapLimit());
- size_t word_start = bit_index_start / kBitsPerWord;
- size_t word_end = bit_index_end / kBitsPerWord;
- DCHECK_LT(word_end * kWordSize, Size());
+ const uintptr_t offset_start = visit_begin - heap_begin_;
+ const uintptr_t offset_end = visit_end - heap_begin_;
- // Trim off left_bits of left bits.
- size_t edge_word = bitmap_begin_[word_start];
+ const uintptr_t index_start = OffsetToIndex(offset_start);
+ const uintptr_t index_end = OffsetToIndex(offset_end);
- // Handle bits on the left first as a special case
- size_t left_bits = bit_index_start & (kBitsPerWord - 1);
- if (left_bits != 0) {
- edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
- }
+ const size_t bit_start = (offset_start / kAlignment) % kBitsPerWord;
+ const size_t bit_end = (offset_end / kAlignment) % kBitsPerWord;
- // If word_start == word_end then handle this case at the same place we handle the right edge.
- if (edge_word != 0 && word_start < word_end) {
- uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
- do {
- const size_t shift = CLZ(edge_word);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
- visitor(obj);
- edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- } while (edge_word != 0);
- }
- word_start++;
+ // Index(begin) ... Index(end)
+ // [xxxxx???][........][????yyyy]
+ // ^ ^
+ // | #---- Bit of visit_end
+ // #---- Bit of visit_begin
+ //
+
+ // Left edge.
+ uword left_edge = bitmap_begin_[index_start];
+ // Mark of lower bits that are not in range.
+ left_edge &= ~((static_cast<uword>(1) << bit_start) - 1);
+
+ // Right edge. Either unique, or left_edge.
+ uword right_edge;
+
+ if (index_start < index_end) {
+ // Left edge != right edge.
- for (size_t i = word_start; i < word_end; i++) {
- size_t w = bitmap_begin_[i];
- if (w != 0) {
- uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+ // Traverse left edge.
+ if (left_edge != 0) {
+ const uintptr_t ptr_base = IndexToOffset(index_start) + heap_begin_;
do {
- const size_t shift = CLZ(w);
+ const size_t shift = CTZ(left_edge);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- } while (w != 0);
+ left_edge ^= (static_cast<uword>(1)) << shift;
+ } while (left_edge != 0);
}
- }
- // Handle the right edge, and also the left edge if both edges are on the same word.
- size_t right_bits = bit_index_end & (kBitsPerWord - 1);
+ // Traverse the middle, full part.
+ for (size_t i = index_start + 1; i < index_end; ++i) {
+ uword w = bitmap_begin_[i];
+ if (w != 0) {
+ const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+ do {
+ const size_t shift = CTZ(w);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ w ^= (static_cast<uword>(1)) << shift;
+ } while (w != 0);
+ }
+ }
- // If word_start == word_end then we need to use the word which we removed the left bits.
- if (word_start <= word_end) {
- edge_word = bitmap_begin_[word_end];
+ // Right edge is unique.
+ // But maybe we don't have anything to do: visit_end starts in a new word...
+ if (bit_end == 0) {
+ // Do not read memory, as it could be after the end of the bitmap.
+ right_edge = 0;
+ } else {
+ right_edge = bitmap_begin_[index_end];
+ }
+ } else {
+ // Right edge = left edge.
+ right_edge = left_edge;
}
- // Bits that we trim off the right.
- edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
- uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
- while (edge_word != 0) {
- const size_t shift = CLZ(edge_word);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
- visitor(obj);
- edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ // Right edge handling.
+ right_edge &= ((static_cast<uword>(1) << bit_end) - 1);
+ if (right_edge != 0) {
+ const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
+ do {
+ const size_t shift = CTZ(right_edge);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ right_edge ^= (static_cast<uword>(1)) << shift;
+ } while (right_edge != 0);
}
#endif
}
@@ -133,10 +153,10 @@ inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
- const word mask = OffsetToMask(offset);
+ const uword mask = OffsetToMask(offset);
DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- word* address = &bitmap_begin_[index];
- word old_word = *address;
+ uword* address = &bitmap_begin_[index];
+ uword old_word = *address;
if (do_set) {
*address = old_word | mask;
} else {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index ad4ff1b..1957c21 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -53,7 +53,7 @@ void ObjectSet::Walk(ObjectCallback* callback, void* arg) {
SpaceBitmap* SpaceBitmap::CreateFromMemMap(const std::string& name, MemMap* mem_map,
byte* heap_begin, size_t heap_capacity) {
CHECK(mem_map != nullptr);
- word* bitmap_begin = reinterpret_cast<word*>(mem_map->Begin());
+ uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
}
@@ -107,16 +107,16 @@ void SpaceBitmap::Walk(ObjectCallback* callback, void* arg) {
CHECK(callback != NULL);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
- word* bitmap_begin = bitmap_begin_;
+ uword* bitmap_begin = bitmap_begin_;
for (uintptr_t i = 0; i <= end; ++i) {
- word w = bitmap_begin[i];
+ uword w = bitmap_begin[i];
if (w != 0) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
- const size_t shift = CLZ(w);
+ const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, arg);
- w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ w ^= (static_cast<uword>(1)) << shift;
} while (w != 0);
}
}
@@ -150,15 +150,15 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap,
size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
CHECK_LT(end, live_bitmap.Size() / kWordSize);
- word* live = live_bitmap.bitmap_begin_;
- word* mark = mark_bitmap.bitmap_begin_;
+ uword* live = live_bitmap.bitmap_begin_;
+ uword* mark = mark_bitmap.bitmap_begin_;
for (size_t i = start; i <= end; i++) {
- word garbage = live[i] & ~mark[i];
+ uword garbage = live[i] & ~mark[i];
if (UNLIKELY(garbage != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
do {
- const size_t shift = CLZ(garbage);
- garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ const size_t shift = CTZ(garbage);
+ garbage ^= (static_cast<uword>(1)) << shift;
*pb++ = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
} while (garbage != 0);
// Make sure that there are always enough slots available for an
@@ -254,14 +254,15 @@ void SpaceBitmap::InOrderWalk(ObjectCallback* callback, void* arg) {
CHECK(callback != NULL);
uintptr_t end = Size() / kWordSize;
for (uintptr_t i = 0; i < end; ++i) {
- word w = bitmap_begin_[i];
+ // Need uint for unsigned shift.
+ uword w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
- const size_t shift = CLZ(w);
+ const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
WalkFieldsInOrder(visited.get(), callback, obj, arg);
- w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ w ^= (static_cast<uword>(1)) << shift;
}
}
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 5fd2bce..a88f3e4 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -70,9 +70,9 @@ class SpaceBitmap {
return static_cast<uintptr_t>(index * kAlignment * kBitsPerWord);
}
- // Pack the bits in backwards so they come out in address order when using CLZ.
- static word OffsetToMask(uintptr_t offset) {
- return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset / kAlignment) % kBitsPerWord);
+ // Bits are packed in the obvious way.
+ static uword OffsetToMask(uintptr_t offset) {
+ return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerWord);
}
inline bool Set(const mirror::Object* obj) {
@@ -123,6 +123,9 @@ class SpaceBitmap {
}
}
+ /**
+ * Visit the live objects in the range [visit_begin, visit_end).
+ */
template <typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
@@ -140,7 +143,7 @@ class SpaceBitmap {
void CopyFrom(SpaceBitmap* source_bitmap);
// Starting address of our internal storage.
- word* Begin() {
+ uword* Begin() {
return bitmap_begin_;
}
@@ -181,7 +184,7 @@ class SpaceBitmap {
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
- SpaceBitmap(const std::string& name, MemMap* mem_map, word* bitmap_begin, size_t bitmap_size,
+ SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
const void* heap_begin)
: mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -193,7 +196,7 @@ class SpaceBitmap {
UniquePtr<MemMap> mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
- word* const bitmap_begin_;
+ uword* const bitmap_begin_;
// Size of this bitmap.
size_t bitmap_size_;
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index ba4e2ac..68994a8 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -86,6 +86,79 @@ TEST_F(SpaceBitmapTest, ScanRange) {
}
}
+class SimpleCounter {
+ public:
+ explicit SimpleCounter(size_t* counter) : count_(counter) {}
+
+ void operator()(mirror::Object* obj) const {
+ (*count_)++;
+ }
+
+ size_t* count_;
+};
+
+class RandGen {
+ public:
+ explicit RandGen(uint32_t seed) : val_(seed) {}
+
+ uint32_t next() {
+ val_ = val_ * 48271 % 2147483647;
+ return val_;
+ }
+
+ uint32_t val_;
+};
+
+void compat_test() NO_THREAD_SAFETY_ANALYSIS {
+ byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+ size_t heap_capacity = 16 * MB;
+
+ // Seed with 0x1234 for reproducability.
+ RandGen r(0x1234);
+
+
+ for (int i = 0; i < 5 ; ++i) {
+ UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test bitmap",
+ heap_begin, heap_capacity));
+
+ for (int j = 0; j < 10000; ++j) {
+ size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ bool set = r.next() % 2 == 1;
+
+ if (set) {
+ space_bitmap->Set(reinterpret_cast<mirror::Object*>(heap_begin + offset));
+ } else {
+ space_bitmap->Clear(reinterpret_cast<mirror::Object*>(heap_begin + offset));
+ }
+ }
+
+ for (int j = 0; j < 50; ++j) {
+ size_t count = 0;
+ SimpleCounter c(&count);
+
+ size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ size_t remain = heap_capacity - offset;
+ size_t end = offset + ((r.next() % (remain + 1)) & ~(0x7));
+
+ space_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(heap_begin) + offset,
+ reinterpret_cast<uintptr_t>(heap_begin) + end, c);
+
+ size_t manual = 0;
+ for (uintptr_t k = offset; k < end; k += kObjectAlignment) {
+ if (space_bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + k))) {
+ manual++;
+ }
+ }
+
+ EXPECT_EQ(count, manual);
+ }
+ }
+}
+
+TEST_F(SpaceBitmapTest, Visitor) {
+ compat_test();
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f5f6f16..920741f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1997,6 +1997,8 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
CHECK_LE(obj_size, kLargeSizeThreshold)
<< "A run slot contains a large object " << Dump();
CHECK_EQ(SizeToIndex(obj_size), idx)
+ << PrettyTypeOf(obj) << " "
+ << "obj_size=" << obj_size << ", idx=" << idx << " "
<< "A run slot contains an object with wrong size " << Dump();
}
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 25f20d6..a06f272 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -65,7 +65,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
bool after_is_current_allocator = allocator == GetCurrentAllocator();
if (is_current_allocator && !after_is_current_allocator) {
// If the allocator changed, we need to restart the allocation.
- return AllocObject<kInstrumented>(self, klass, byte_count);
+ return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
}
return nullptr;
}
@@ -111,7 +111,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
DCHECK(!Runtime::Current()->HasStatsEnabled());
}
if (AllocatorHasAllocationStack(allocator)) {
- PushOnAllocationStack(self, obj);
+ PushOnAllocationStack(self, &obj);
}
if (kInstrumented) {
if (Dbg::IsAllocTrackingEnabled()) {
@@ -135,28 +135,34 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
// The size of a thread-local allocation stack in the number of references.
static constexpr size_t kThreadLocalAllocationStackSize = 128;
-inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) {
+inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
if (kUseThreadLocalAllocationStack) {
- bool success = self->PushOnThreadLocalAllocationStack(obj);
+ bool success = self->PushOnThreadLocalAllocationStack(*obj);
if (UNLIKELY(!success)) {
// Slow path. Allocate a new thread-local allocation stack.
mirror::Object** start_address;
mirror::Object** end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
&start_address, &end_address)) {
+ // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
+ SirtRefNoVerify<mirror::Object> ref(self, *obj);
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ *obj = ref.get();
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
- success = self->PushOnThreadLocalAllocationStack(obj);
+ success = self->PushOnThreadLocalAllocationStack(*obj);
// Must succeed.
CHECK(success);
}
} else {
// This is safe to do since the GC will never free objects which are neither in the allocation
// stack or the live bitmap.
- while (!allocation_stack_->AtomicPushBack(obj)) {
+ while (!allocation_stack_->AtomicPushBack(*obj)) {
+ // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
+ SirtRefNoVerify<mirror::Object> ref(self, *obj);
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ *obj = ref.get();
}
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5879757..ffb4e59 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -158,28 +158,28 @@ class Heap {
~Heap();
// Allocates and initializes storage for an object instance.
- template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
GetCurrentAllocator(),
pre_fence_visitor);
}
- template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
GetCurrentNonMovingAllocator(),
pre_fence_visitor);
}
- template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
AllocatorType GetCurrentAllocator() const {
@@ -691,7 +691,8 @@ class Heap {
void SignalHeapTrimDaemon(Thread* self);
// Push an object onto the allocation stack.
- void PushOnAllocationStack(Thread* self, mirror::Object* obj);
+ void PushOnAllocationStack(Thread* self, mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.