summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-04-03 01:17:54 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-04-03 01:17:54 +0000
commit744924b701febe88bd67a9080999c08c76745807 (patch)
treec0bc9c8f5d0c4dbf2c45daf67975b5fa8a3b87c5 /runtime
parent5cff3083c00dfbae7b8c5c34b0bedbc514c91df9 (diff)
parentcb8aea4bd5e77857c713edeb62bffcb1f7f06a39 (diff)
downloadart-744924b701febe88bd67a9080999c08c76745807.zip
art-744924b701febe88bd67a9080999c08c76745807.tar.gz
art-744924b701febe88bd67a9080999c08c76745807.tar.bz2
Merge "Make SpaceBitmap cross-compiling tolerant"
Diffstat (limited to 'runtime')
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h125
-rw-r--r--runtime/gc/accounting/space_bitmap.cc27
-rw-r--r--runtime/gc/accounting/space_bitmap.h12
3 files changed, 85 insertions, 79 deletions
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index d6d1b3e..0fbd27c 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -29,10 +29,10 @@ inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
- const word mask = OffsetToMask(offset);
- word* const address = &bitmap_begin_[index];
+ const uword mask = OffsetToMask(offset);
+ uword* const address = &bitmap_begin_[index];
DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- word old_word;
+ uword old_word;
do {
old_word = *address;
// Fast path: The bit is already set.
@@ -58,74 +58,79 @@ template <typename Visitor>
void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
const Visitor& visitor) const {
DCHECK_LT(visit_begin, visit_end);
-#ifdef __LP64__
- // TODO: make the optimized code below work in the 64bit case.
- for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(i);
- if (Test(obj)) {
- visitor(obj);
- }
- }
-#else
- const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
- const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
+ DCHECK_LE(heap_begin_, visit_begin);
+ DCHECK_LE(visit_end, HeapLimit());
- size_t word_start = bit_index_start / kBitsPerWord;
- size_t word_end = bit_index_end / kBitsPerWord;
- DCHECK_LT(word_end * kWordSize, Size());
+ const uintptr_t offset_start = visit_begin - heap_begin_;
+ const uintptr_t offset_end = visit_end - heap_begin_;
- // Trim off left_bits of left bits.
- size_t edge_word = bitmap_begin_[word_start];
+ const uintptr_t index_start = OffsetToIndex(offset_start);
+ const uintptr_t index_end = OffsetToIndex(offset_end);
- // Handle bits on the left first as a special case
- size_t left_bits = bit_index_start & (kBitsPerWord - 1);
- if (left_bits != 0) {
- edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
- }
+ const size_t bit_start = (offset_start / kAlignment) % kBitsPerWord;
+ const size_t bit_end = (offset_end / kAlignment) % kBitsPerWord;
- // If word_start == word_end then handle this case at the same place we handle the right edge.
- if (edge_word != 0 && word_start < word_end) {
- uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
- do {
- const size_t shift = CLZ(edge_word);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
- visitor(obj);
- edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- } while (edge_word != 0);
- }
- word_start++;
+ // Index(begin) ... Index(end)
+ // [xxxxx???][........][????yyyy]
+ // ^ ^
+ // | #---- Bit of visit_end
+ // #---- Bit of visit_begin
+ //
+
+ // Left edge.
+ uword left_edge = bitmap_begin_[index_start];
+ // Mark of lower bits that are not in range.
+ left_edge &= ~((static_cast<uword>(1) << bit_start) - 1);
- for (size_t i = word_start; i < word_end; i++) {
- size_t w = bitmap_begin_[i];
- if (w != 0) {
- uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+ // Right edge. Either unique, or left_edge.
+ uword right_edge;
+
+ if (index_start < index_end) {
+ // Left edge != right edge.
+
+ // Traverse left edge.
+ if (left_edge != 0) {
+ const uintptr_t ptr_base = IndexToOffset(index_start) + heap_begin_;
do {
- const size_t shift = CLZ(w);
+ const size_t shift = CTZ(left_edge);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
- w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
- } while (w != 0);
+ left_edge ^= (static_cast<uword>(1)) << shift;
+ } while (left_edge != 0);
}
- }
- // Handle the right edge, and also the left edge if both edges are on the same word.
- size_t right_bits = bit_index_end & (kBitsPerWord - 1);
+ // Traverse the middle, full part.
+ for (size_t i = index_start + 1; i < index_end; ++i) {
+ uword w = bitmap_begin_[i];
+ if (w != 0) {
+ const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+ do {
+ const size_t shift = CTZ(w);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ w ^= (static_cast<uword>(1)) << shift;
+ } while (w != 0);
+ }
+ }
- // If word_start == word_end then we need to use the word which we removed the left bits.
- if (word_start <= word_end) {
- edge_word = bitmap_begin_[word_end];
+ // Right edge is unique.
+ right_edge = bitmap_begin_[index_end];
+ } else {
+ // Right edge = left edge.
+ right_edge = left_edge;
}
- // Bits that we trim off the right.
- edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
- uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
- while (edge_word != 0) {
- const size_t shift = CLZ(edge_word);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
- visitor(obj);
- edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ // Right edge handling.
+ right_edge &= ((static_cast<uword>(1) << bit_end) - 1) | (static_cast<uword>(1) << bit_end);
+ if (right_edge != 0) {
+ const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
+ do {
+ const size_t shift = CTZ(right_edge);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ right_edge ^= (static_cast<uword>(1)) << shift;
+ } while (right_edge != 0);
}
-#endif
}
inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
@@ -133,10 +138,10 @@ inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
- const word mask = OffsetToMask(offset);
+ const uword mask = OffsetToMask(offset);
DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
- word* address = &bitmap_begin_[index];
- word old_word = *address;
+ uword* address = &bitmap_begin_[index];
+ uword old_word = *address;
if (do_set) {
*address = old_word | mask;
} else {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index ad4ff1b..1957c21 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -53,7 +53,7 @@ void ObjectSet::Walk(ObjectCallback* callback, void* arg) {
SpaceBitmap* SpaceBitmap::CreateFromMemMap(const std::string& name, MemMap* mem_map,
byte* heap_begin, size_t heap_capacity) {
CHECK(mem_map != nullptr);
- word* bitmap_begin = reinterpret_cast<word*>(mem_map->Begin());
+ uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
}
@@ -107,16 +107,16 @@ void SpaceBitmap::Walk(ObjectCallback* callback, void* arg) {
CHECK(callback != NULL);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
- word* bitmap_begin = bitmap_begin_;
+ uword* bitmap_begin = bitmap_begin_;
for (uintptr_t i = 0; i <= end; ++i) {
- word w = bitmap_begin[i];
+ uword w = bitmap_begin[i];
if (w != 0) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
- const size_t shift = CLZ(w);
+ const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
(*callback)(obj, arg);
- w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ w ^= (static_cast<uword>(1)) << shift;
} while (w != 0);
}
}
@@ -150,15 +150,15 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap,
size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
CHECK_LT(end, live_bitmap.Size() / kWordSize);
- word* live = live_bitmap.bitmap_begin_;
- word* mark = mark_bitmap.bitmap_begin_;
+ uword* live = live_bitmap.bitmap_begin_;
+ uword* mark = mark_bitmap.bitmap_begin_;
for (size_t i = start; i <= end; i++) {
- word garbage = live[i] & ~mark[i];
+ uword garbage = live[i] & ~mark[i];
if (UNLIKELY(garbage != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
do {
- const size_t shift = CLZ(garbage);
- garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ const size_t shift = CTZ(garbage);
+ garbage ^= (static_cast<uword>(1)) << shift;
*pb++ = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
} while (garbage != 0);
// Make sure that there are always enough slots available for an
@@ -254,14 +254,15 @@ void SpaceBitmap::InOrderWalk(ObjectCallback* callback, void* arg) {
CHECK(callback != NULL);
uintptr_t end = Size() / kWordSize;
for (uintptr_t i = 0; i < end; ++i) {
- word w = bitmap_begin_[i];
+ // Need uint for unsigned shift.
+ uword w = bitmap_begin_[i];
if (UNLIKELY(w != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
- const size_t shift = CLZ(w);
+ const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
WalkFieldsInOrder(visited.get(), callback, obj, arg);
- w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+ w ^= (static_cast<uword>(1)) << shift;
}
}
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 5fd2bce..aa24b03 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -70,9 +70,9 @@ class SpaceBitmap {
return static_cast<uintptr_t>(index * kAlignment * kBitsPerWord);
}
- // Pack the bits in backwards so they come out in address order when using CLZ.
- static word OffsetToMask(uintptr_t offset) {
- return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset / kAlignment) % kBitsPerWord);
+ // Bits are packed in the obvious way.
+ static uword OffsetToMask(uintptr_t offset) {
+ return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerWord);
}
inline bool Set(const mirror::Object* obj) {
@@ -140,7 +140,7 @@ class SpaceBitmap {
void CopyFrom(SpaceBitmap* source_bitmap);
// Starting address of our internal storage.
- word* Begin() {
+ uword* Begin() {
return bitmap_begin_;
}
@@ -181,7 +181,7 @@ class SpaceBitmap {
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
- SpaceBitmap(const std::string& name, MemMap* mem_map, word* bitmap_begin, size_t bitmap_size,
+ SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
const void* heap_begin)
: mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -193,7 +193,7 @@ class SpaceBitmap {
UniquePtr<MemMap> mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
- word* const bitmap_begin_;
+ uword* const bitmap_begin_;
// Size of this bitmap.
size_t bitmap_size_;