summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/card_table-inl.h2
-rw-r--r--runtime/gc/accounting/card_table.h5
-rw-r--r--runtime/gc/accounting/heap_bitmap-inl.h76
-rw-r--r--runtime/gc/accounting/heap_bitmap.cc67
-rw-r--r--runtime/gc/accounting/heap_bitmap.h27
-rw-r--r--runtime/gc/accounting/mod_union_table.cc7
-rw-r--r--runtime/gc/accounting/mod_union_table.h1
-rw-r--r--runtime/gc/accounting/remembered_set.cc2
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h39
-rw-r--r--runtime/gc/accounting/space_bitmap.cc140
-rw-r--r--runtime/gc/accounting/space_bitmap.h137
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc42
-rw-r--r--runtime/gc/collector/garbage_collector.cc16
-rw-r--r--runtime/gc/collector/mark_sweep.cc163
-rw-r--r--runtime/gc/collector/mark_sweep.h15
-rw-r--r--runtime/gc/collector/semi_space-inl.h53
-rw-r--r--runtime/gc/collector/semi_space.cc72
-rw-r--r--runtime/gc/collector/semi_space.h13
-rw-r--r--runtime/gc/heap.cc115
-rw-r--r--runtime/gc/heap.h21
-rw-r--r--runtime/gc/heap_test.cc10
-rw-r--r--runtime/gc/space/bump_pointer_space.cc2
-rw-r--r--runtime/gc/space/bump_pointer_space.h6
-rw-r--r--runtime/gc/space/dlmalloc_space.cc4
-rw-r--r--runtime/gc/space/image_space.cc10
-rw-r--r--runtime/gc/space/image_space.h10
-rw-r--r--runtime/gc/space/large_object_space.cc79
-rw-r--r--runtime/gc/space/large_object_space.h35
-rw-r--r--runtime/gc/space/malloc_space.cc24
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/rosalloc_space.cc4
-rw-r--r--runtime/gc/space/space.cc44
-rw-r--r--runtime/gc/space/space.h45
-rw-r--r--runtime/gc/space/zygote_space.cc8
-rw-r--r--runtime/gc/space/zygote_space.h11
35 files changed, 661 insertions, 646 deletions
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 564168e..a1d001e 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -43,7 +43,7 @@ static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
}
template <typename Visitor>
-inline size_t CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
+inline size_t CardTable::Scan(ContinuousSpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
const Visitor& visitor, const byte minimum_age) const {
DCHECK(bitmap->HasAddress(scan_begin));
DCHECK(bitmap->HasAddress(scan_end - 1)); // scan_end is the byte after the last byte we scan.
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 8b7bfd3..8d5dc07 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -38,7 +38,7 @@ class Heap;
namespace accounting {
-class SpaceBitmap;
+template<size_t kAlignment> class SpaceBitmap;
// Maintain a card table from the the write barrier. All writes of
// non-NULL values to heap addresses should go through an entry in
@@ -102,7 +102,8 @@ class CardTable {
// For every dirty at least minumum age between begin and end invoke the visitor with the
// specified argument. Returns how many cards the visitor was run on.
template <typename Visitor>
- size_t Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor,
+ size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, byte* scan_begin, byte* scan_end,
+ const Visitor& visitor,
const byte minimum_age = kCardDirty) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 04e85d2..c67542f 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -30,56 +30,76 @@ inline void HeapBitmap::Visit(const Visitor& visitor) {
for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
- DCHECK(!discontinuous_space_sets_.empty());
- for (const auto& space_set : discontinuous_space_sets_) {
- space_set->Visit(visitor);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
}
inline bool HeapBitmap::Test(const mirror::Object* obj) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
if (LIKELY(bitmap != nullptr)) {
return bitmap->Test(obj);
- } else {
- return GetDiscontinuousSpaceObjectSet(obj) != NULL;
}
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ return bitmap->Test(obj);
+ }
+ }
+ LOG(FATAL) << "Invalid object " << obj;
+ return false;
}
inline void HeapBitmap::Clear(const mirror::Object* obj) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
if (LIKELY(bitmap != nullptr)) {
bitmap->Clear(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Clear(obj);
+ return;
+ }
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ bitmap->Clear(obj);
+ }
}
+ LOG(FATAL) << "Invalid object " << obj;
}
-inline void HeapBitmap::Set(const mirror::Object* obj) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Set(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Set(obj);
+template<typename LargeObjectSetVisitor>
+inline bool HeapBitmap::Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor) {
+ ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ return bitmap->Set(obj);
+ }
+ visitor(obj);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ return bitmap->Set(obj);
+ }
}
+ LOG(FATAL) << "Invalid object " << obj;
+ return false;
}
-inline SpaceBitmap* HeapBitmap::GetContinuousSpaceBitmap(const mirror::Object* obj) const {
- for (const auto& bitmap : continuous_space_bitmaps_) {
- if (bitmap->HasAddress(obj)) {
- return bitmap;
+template<typename LargeObjectSetVisitor>
+inline bool HeapBitmap::AtomicTestAndSet(const mirror::Object* obj,
+ const LargeObjectSetVisitor& visitor) {
+ ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ return bitmap->AtomicTestAndSet(obj);
+ }
+ visitor(obj);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ return bitmap->AtomicTestAndSet(obj);
}
}
- return nullptr;
+ LOG(FATAL) << "Invalid object " << obj;
+ return false;
}
-inline ObjectSet* HeapBitmap::GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const {
- for (const auto& space_set : discontinuous_space_sets_) {
- if (space_set->Test(obj)) {
- return space_set;
+inline ContinuousSpaceBitmap* HeapBitmap::GetContinuousSpaceBitmap(const mirror::Object* obj) const {
+ for (const auto& bitmap : continuous_space_bitmaps_) {
+ if (bitmap->HasAddress(obj)) {
+ return bitmap;
}
}
return nullptr;
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index f94cf24..a5d59bf 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -16,68 +16,67 @@
#include "heap_bitmap.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "gc/space/space.h"
namespace art {
namespace gc {
namespace accounting {
-void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
- for (auto& bitmap : continuous_space_bitmaps_) {
- if (bitmap == old_bitmap) {
- bitmap = new_bitmap;
- return;
- }
- }
- LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
+void HeapBitmap::ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap,
+ ContinuousSpaceBitmap* new_bitmap) {
+ auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(),
+ old_bitmap);
+ CHECK(it != continuous_space_bitmaps_.end()) << " continuous space bitmap " << old_bitmap
+ << " not found";
+ *it = new_bitmap;
}
-void HeapBitmap::ReplaceObjectSet(ObjectSet* old_set, ObjectSet* new_set) {
- for (auto& space_set : discontinuous_space_sets_) {
- if (space_set == old_set) {
- space_set = new_set;
- return;
- }
- }
- LOG(FATAL) << "object set " << static_cast<const void*>(old_set) << " not found";
+void HeapBitmap::ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap,
+ LargeObjectBitmap* new_bitmap) {
+ auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), old_bitmap);
+ CHECK(it != large_object_bitmaps_.end()) << " large object bitmap " << old_bitmap
+ << " not found";
+ *it = new_bitmap;
}
-void HeapBitmap::AddContinuousSpaceBitmap(accounting::SpaceBitmap* bitmap) {
- DCHECK(bitmap != NULL);
-
- // Check for interval overlap.
+void HeapBitmap::AddContinuousSpaceBitmap(accounting::ContinuousSpaceBitmap* bitmap) {
+ DCHECK(bitmap != nullptr);
+ // Check that there is no bitmap overlap.
for (const auto& cur_bitmap : continuous_space_bitmaps_) {
- CHECK(!(
- bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
- bitmap->HeapLimit() > cur_bitmap->HeapBegin()))
- << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap " << cur_bitmap->Dump();
+ CHECK(bitmap->HeapBegin() >= cur_bitmap->HeapLimit() ||
+ bitmap->HeapLimit() <= cur_bitmap->HeapBegin())
+ << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap "
+ << cur_bitmap->Dump();
}
continuous_space_bitmaps_.push_back(bitmap);
}
-void HeapBitmap::RemoveContinuousSpaceBitmap(accounting::SpaceBitmap* bitmap) {
+void HeapBitmap::RemoveContinuousSpaceBitmap(accounting::ContinuousSpaceBitmap* bitmap) {
+ DCHECK(bitmap != nullptr);
auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(), bitmap);
DCHECK(it != continuous_space_bitmaps_.end());
continuous_space_bitmaps_.erase(it);
}
-void HeapBitmap::AddDiscontinuousObjectSet(ObjectSet* set) {
- DCHECK(set != NULL);
- discontinuous_space_sets_.push_back(set);
+void HeapBitmap::AddLargeObjectBitmap(LargeObjectBitmap* bitmap) {
+ DCHECK(bitmap != nullptr);
+ large_object_bitmaps_.push_back(bitmap);
}
-void HeapBitmap::RemoveDiscontinuousObjectSet(ObjectSet* set) {
- auto it = std::find(discontinuous_space_sets_.begin(), discontinuous_space_sets_.end(), set);
- DCHECK(it != discontinuous_space_sets_.end());
- discontinuous_space_sets_.erase(it);
+void HeapBitmap::RemoveLargeObjectBitmap(LargeObjectBitmap* bitmap) {
+ DCHECK(bitmap != nullptr);
+ auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), bitmap);
+ DCHECK(it != large_object_bitmaps_.end());
+ large_object_bitmaps_.erase(it);
}
void HeapBitmap::Walk(ObjectCallback* callback, void* arg) {
for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->Walk(callback, arg);
}
- for (const auto& space_set : discontinuous_space_sets_) {
- space_set->Walk(callback, arg);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ bitmap->Walk(callback, arg);
}
}
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index f729c0e..814dc06 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -33,9 +33,13 @@ class HeapBitmap {
public:
bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
- ObjectSet* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const;
+ template<typename LargeObjectSetVisitor>
+ bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+ template<typename LargeObjectSetVisitor>
+ bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+ ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -46,11 +50,11 @@ class HeapBitmap {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
- void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
+ void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
- void ReplaceObjectSet(ObjectSet* old_set, ObjectSet* new_set)
+ void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
explicit HeapBitmap(Heap* heap) : heap_(heap) {}
@@ -58,16 +62,17 @@ class HeapBitmap {
private:
const Heap* const heap_;
- void AddContinuousSpaceBitmap(SpaceBitmap* bitmap);
- void RemoveContinuousSpaceBitmap(SpaceBitmap* bitmap);
- void AddDiscontinuousObjectSet(ObjectSet* set);
- void RemoveDiscontinuousObjectSet(ObjectSet* set);
+ void AddContinuousSpaceBitmap(ContinuousSpaceBitmap* bitmap);
+ void RemoveContinuousSpaceBitmap(ContinuousSpaceBitmap* bitmap);
+ void AddLargeObjectBitmap(LargeObjectBitmap* bitmap);
+ void RemoveLargeObjectBitmap(LargeObjectBitmap* bitmap);
// Bitmaps covering continuous spaces.
- std::vector<SpaceBitmap*, GcAllocator<SpaceBitmap*>> continuous_space_bitmaps_;
+ std::vector<ContinuousSpaceBitmap*, GcAllocator<ContinuousSpaceBitmap*>>
+ continuous_space_bitmaps_;
// Sets covering discontinuous spaces.
- std::vector<ObjectSet*, GcAllocator<ObjectSet*>> discontinuous_space_sets_;
+ std::vector<LargeObjectBitmap*, GcAllocator<LargeObjectBitmap*>> large_object_bitmaps_;
friend class art::gc::Heap;
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 34ca654..d744dee 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -19,6 +19,7 @@
#include "base/stl_util.h"
#include "card_table-inl.h"
#include "heap_bitmap.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/mark_sweep.h"
#include "gc/collector/mark_sweep-inl.h"
#include "gc/heap.h"
@@ -222,7 +223,7 @@ void ModUnionTableReferenceCache::Verify() {
// Check the references of each clean card which is also in the mod union table.
CardTable* card_table = heap_->GetCardTable();
- SpaceBitmap* live_bitmap = space_->GetLiveBitmap();
+ ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
for (const auto& ref_pair : references_) {
const byte* card = ref_pair.first;
if (*card == CardTable::kCardClean) {
@@ -272,7 +273,7 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallb
uintptr_t end = start + CardTable::kCardSize;
auto* space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
DCHECK(space != nullptr);
- SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, add_visitor);
// Update the corresponding references for the card.
@@ -312,7 +313,7 @@ void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback*
void* arg) {
CardTable* card_table = heap_->GetCardTable();
ModUnionScanImageRootVisitor scan_visitor(callback, arg);
- SpaceBitmap* bitmap = space_->GetLiveBitmap();
+ ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
for (const byte* card_addr : cleared_cards_) {
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
DCHECK(space_->HasAddress(reinterpret_cast<Object*>(start)));
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index c3a90e2..5ae7c77 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -44,7 +44,6 @@ class Heap;
namespace accounting {
-class SpaceBitmap;
class HeapBitmap;
// The mod-union table is the union of modified cards. It is used to allow the card table to be
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 56f7caa..044216e 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -112,7 +112,7 @@ void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
bool contains_reference_to_target_space = false;
RememberedSetObjectVisitor obj_visitor(callback, target_space,
&contains_reference_to_target_space, arg);
- SpaceBitmap* bitmap = space_->GetLiveBitmap();
+ ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
CardSet remove_card_set;
for (byte* const card_addr : dirty_cards_) {
contains_reference_to_target_space = false;
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 880ff1f..ed140e0 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -17,14 +17,26 @@
#ifndef ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
#define ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
+#include "space_bitmap.h"
+
#include "base/logging.h"
+#include "dex_file-inl.h"
+#include "heap_bitmap.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "space_bitmap-inl.h"
+#include "UniquePtr.h"
#include "utils.h"
namespace art {
namespace gc {
namespace accounting {
-inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
+template<size_t kAlignment>
+inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
@@ -45,7 +57,8 @@ inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
return false;
}
-inline bool SpaceBitmap::Test(const mirror::Object* obj) const {
+template<size_t kAlignment>
+inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK(HasAddress(obj)) << obj;
DCHECK(bitmap_begin_ != NULL);
@@ -54,9 +67,9 @@ inline bool SpaceBitmap::Test(const mirror::Object* obj) const {
return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
}
-template <typename Visitor>
-void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
- const Visitor& visitor) const {
+template<size_t kAlignment> template<typename Visitor>
+inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
+ const Visitor& visitor) const {
DCHECK_LT(visit_begin, visit_end);
#if 0
for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
@@ -148,7 +161,8 @@ void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
#endif
}
-inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
+template<size_t kAlignment> template<bool kSetBit>
+inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
@@ -157,15 +171,24 @@ inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
uword* address = &bitmap_begin_[index];
uword old_word = *address;
- if (do_set) {
+ if (kSetBit) {
*address = old_word | mask;
} else {
*address = old_word & ~mask;
}
- DCHECK_EQ(Test(obj), do_set);
+ DCHECK_EQ(Test(obj), kSetBit);
return (old_word & mask) != 0;
}
+template<size_t kAlignment>
+inline std::ostream& operator << (std::ostream& stream, const SpaceBitmap<kAlignment>& bitmap) {
+ return stream
+ << bitmap.GetName() << "["
+ << "begin=" << reinterpret_cast<const void*>(bitmap.HeapBegin())
+ << ",end=" << reinterpret_cast<const void*>(bitmap.HeapLimit())
+ << "]";
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 1957c21..31a1537 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -14,68 +14,52 @@
* limitations under the License.
*/
-#include "base/logging.h"
-#include "dex_file-inl.h"
-#include "heap_bitmap.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "object_utils.h"
#include "space_bitmap-inl.h"
-#include "UniquePtr.h"
-#include "utils.h"
namespace art {
namespace gc {
namespace accounting {
-std::string SpaceBitmap::GetName() const {
- return name_;
-}
-
-void SpaceBitmap::SetName(const std::string& name) {
- name_ = name;
-}
-
-std::string SpaceBitmap::Dump() const {
- return StringPrintf("%s: %p-%p", name_.c_str(),
- reinterpret_cast<void*>(HeapBegin()),
- reinterpret_cast<void*>(HeapLimit()));
-}
-
-void ObjectSet::Walk(ObjectCallback* callback, void* arg) {
- for (const mirror::Object* obj : contained_) {
- callback(const_cast<mirror::Object*>(obj), arg);
- }
-}
-
-SpaceBitmap* SpaceBitmap::CreateFromMemMap(const std::string& name, MemMap* mem_map,
- byte* heap_begin, size_t heap_capacity) {
+template<size_t kAlignment>
+SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
+ const std::string& name, MemMap* mem_map, byte* heap_begin, size_t heap_capacity) {
CHECK(mem_map != nullptr);
uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
- size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+ const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
+ size_t bitmap_size = (RoundUp(static_cast<uint64_t>(heap_capacity), kBytesCoveredPerWord) /
+ kBytesCoveredPerWord) * kWordSize;
return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
}
-SpaceBitmap* SpaceBitmap::Create(const std::string& name, byte* heap_begin, size_t heap_capacity) {
- CHECK(heap_begin != NULL);
+template<size_t kAlignment>
+SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin,
+ size_t bitmap_size, const void* heap_begin)
+ : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
+ heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
+ name_(name) {
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK_NE(bitmap_size, 0U);
+}
+
+template<size_t kAlignment>
+SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
+ const std::string& name, byte* heap_begin, size_t heap_capacity) {
// Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
- size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+ const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
+ size_t bitmap_size = (RoundUp(static_cast<uint64_t>(heap_capacity), kBytesCoveredPerWord) /
+ kBytesCoveredPerWord) * kWordSize;
std::string error_msg;
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), NULL, bitmap_size,
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
PROT_READ | PROT_WRITE, false, &error_msg));
if (UNLIKELY(mem_map.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
- return NULL;
+ return nullptr;
}
return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
}
-// Clean up any resources associated with the bitmap.
-SpaceBitmap::~SpaceBitmap() {}
-
-void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) {
DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
if (new_size < bitmap_size_) {
@@ -85,7 +69,8 @@ void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
// should be marked.
}
-void SpaceBitmap::Clear() {
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::Clear() {
if (bitmap_begin_ != NULL) {
// This returns the memory to the system. Successive page faults will return zeroed memory.
int result = madvise(bitmap_begin_, bitmap_size_, MADV_DONTNEED);
@@ -95,14 +80,14 @@ void SpaceBitmap::Clear() {
}
}
-void SpaceBitmap::CopyFrom(SpaceBitmap* source_bitmap) {
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
DCHECK_EQ(Size(), source_bitmap->Size());
std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / kWordSize, Begin());
}
-// Visits set bits in address order. The callback is not permitted to
-// change the bitmap bits or max during the traversal.
-void SpaceBitmap::Walk(ObjectCallback* callback, void* arg) {
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
CHECK(bitmap_begin_ != NULL);
CHECK(callback != NULL);
@@ -122,17 +107,13 @@ void SpaceBitmap::Walk(ObjectCallback* callback, void* arg) {
}
}
-// Walk through the bitmaps in increasing address order, and find the
-// object pointers that correspond to garbage objects. Call
-// <callback> zero or more times with lists of these object pointers.
-//
-// The callback is not permitted to increase the max of either bitmap.
-void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap,
- const SpaceBitmap& mark_bitmap,
- uintptr_t sweep_begin, uintptr_t sweep_end,
- SpaceBitmap::SweepCallback* callback, void* arg) {
- CHECK(live_bitmap.bitmap_begin_ != NULL);
- CHECK(mark_bitmap.bitmap_begin_ != NULL);
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitmap,
+ const SpaceBitmap<kAlignment>& mark_bitmap,
+ uintptr_t sweep_begin, uintptr_t sweep_end,
+ SpaceBitmap::SweepCallback* callback, void* arg) {
+ CHECK(live_bitmap.bitmap_begin_ != nullptr);
+ CHECK(mark_bitmap.bitmap_begin_ != nullptr);
CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
CHECK(callback != NULL);
@@ -174,13 +155,10 @@ void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap,
}
}
-static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
- void* arg);
-
-// Walk instance fields of the given Class. Separate function to allow recursion on the super
-// class.
-static void WalkInstanceFields(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
- mirror::Class* klass, void* arg)
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visited,
+ ObjectCallback* callback, mirror::Object* obj,
+ mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
@@ -203,10 +181,10 @@ static void WalkInstanceFields(SpaceBitmap* visited, ObjectCallback* callback, m
}
}
-// For an unvisited object, visit it then all its children found via fields.
-static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited,
+ ObjectCallback* callback, mirror::Object* obj,
+ void* arg) {
if (visited->Test(obj)) {
return;
}
@@ -244,14 +222,13 @@ static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mi
}
}
-// Visits set bits with an in order traversal. The callback is not permitted to change the bitmap
-// bits or max during the traversal.
-void SpaceBitmap::InOrderWalk(ObjectCallback* callback, void* arg) {
- UniquePtr<SpaceBitmap> visited(Create("bitmap for in-order walk",
- reinterpret_cast<byte*>(heap_begin_),
- IndexToOffset(bitmap_size_ / kWordSize)));
- CHECK(bitmap_begin_ != NULL);
- CHECK(callback != NULL);
+template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::InOrderWalk(ObjectCallback* callback, void* arg) {
+ UniquePtr<SpaceBitmap<kAlignment>> visited(
+ Create("bitmap for in-order walk", reinterpret_cast<byte*>(heap_begin_),
+ IndexToOffset(bitmap_size_ / kWordSize)));
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK(callback != nullptr);
uintptr_t end = Size() / kWordSize;
for (uintptr_t i = 0; i < end; ++i) {
// Need uint for unsigned shift.
@@ -268,13 +245,8 @@ void SpaceBitmap::InOrderWalk(ObjectCallback* callback, void* arg) {
}
}
-std::ostream& operator << (std::ostream& stream, const SpaceBitmap& bitmap) {
- return stream
- << bitmap.GetName() << "["
- << "begin=" << reinterpret_cast<const void*>(bitmap.HeapBegin())
- << ",end=" << reinterpret_cast<const void*>(bitmap.HeapLimit())
- << "]";
-}
+template class SpaceBitmap<kObjectAlignment>;
+template class SpaceBitmap<kPageSize>;
} // namespace accounting
} // namespace gc
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index a88f3e4..df3fd37 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -38,11 +38,9 @@ namespace mirror {
namespace gc {
namespace accounting {
+template<size_t kAlignment>
class SpaceBitmap {
public:
- // Alignment of objects within spaces.
- static const size_t kAlignment = 8;
-
typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg);
typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg);
@@ -57,30 +55,31 @@ class SpaceBitmap {
static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
byte* heap_begin, size_t heap_capacity);
- ~SpaceBitmap();
+ ~SpaceBitmap() {
+ }
// <offset> is the difference from .base to a pointer address.
// <index> is the index of .bits that contains the bit representing
// <offset>.
- static size_t OffsetToIndex(size_t offset) {
+ static size_t OffsetToIndex(size_t offset) ALWAYS_INLINE {
return offset / kAlignment / kBitsPerWord;
}
- static uintptr_t IndexToOffset(size_t index) {
+ static uintptr_t IndexToOffset(size_t index) ALWAYS_INLINE {
return static_cast<uintptr_t>(index * kAlignment * kBitsPerWord);
}
// Bits are packed in the obvious way.
- static uword OffsetToMask(uintptr_t offset) {
+ static uword OffsetToMask(uintptr_t offset) ALWAYS_INLINE {
return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerWord);
}
- inline bool Set(const mirror::Object* obj) {
- return Modify(obj, true);
+ bool Set(const mirror::Object* obj) ALWAYS_INLINE {
+ return Modify<true>(obj);
}
- inline bool Clear(const mirror::Object* obj) {
- return Modify(obj, false);
+ bool Clear(const mirror::Object* obj) ALWAYS_INLINE {
+ return Modify<false>(obj);
}
// Returns true if the object was previously marked.
@@ -123,20 +122,26 @@ class SpaceBitmap {
}
}
- /**
- * Visit the live objects in the range [visit_begin, visit_end).
- */
+ // Visit the live objects in the range [visit_begin, visit_end).
+ // TODO: Use lock annotations when clang is fixed.
+ // EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ NO_THREAD_SAFETY_ANALYSIS;
+ // Visits set bits in address order. The callback is not permitted to change the bitmap bits or
+ // max during the traversal.
void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ // Visits set bits with an in order traversal. The callback is not permitted to change the bitmap
+ // bits or max during the traversal.
void InOrderWalk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ // Walk through the bitmaps in increasing address order, and find the object pointers that
+ // correspond to garbage objects. Call <callback> zero or more times with lists of these object
+ // pointers. The callback is not permitted to increase the max of either bitmap.
static void SweepWalk(const SpaceBitmap& live, const SpaceBitmap& mark, uintptr_t base,
uintptr_t max, SweepCallback* thunk, void* arg);
@@ -169,10 +174,18 @@ class SpaceBitmap {
// Set the max address which can covered by the bitmap.
void SetHeapLimit(uintptr_t new_end);
- std::string GetName() const;
- void SetName(const std::string& name);
+ std::string GetName() const {
+ return name_;
+ }
+
+ void SetName(const std::string& name) {
+ name_ = name;
+ }
- std::string Dump() const;
+ std::string Dump() const {
+ return StringPrintf("%s: %p-%p", name_.c_str(), reinterpret_cast<void*>(HeapBegin()),
+ reinterpret_cast<void*>(HeapLimit()));
+ }
const void* GetObjectWordAddress(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
@@ -185,12 +198,19 @@ class SpaceBitmap {
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
- const void* heap_begin)
- : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
- heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
- name_(name) {}
-
- bool Modify(const mirror::Object* obj, bool do_set);
+ const void* heap_begin);
+
+ template<bool kSetBit>
+ bool Modify(const mirror::Object* obj);
+
+ // For an unvisited object, visit it then all its children found via fields.
+ static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
+ void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Walk instance fields of the given Class. Separate function to allow recursion on the super
+ // class.
+ static void WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback,
+ mirror::Object* obj, mirror::Class* klass, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Backing storage for bitmap.
UniquePtr<MemMap> mem_map_;
@@ -209,70 +229,11 @@ class SpaceBitmap {
std::string name_;
};
-// Like a bitmap except it keeps track of objects using sets.
-class ObjectSet {
- public:
- typedef std::set<
- const mirror::Object*, std::less<const mirror::Object*>,
- GcAllocator<const mirror::Object*> > Objects;
-
- bool IsEmpty() const {
- return contained_.empty();
- }
-
- inline void Set(const mirror::Object* obj) {
- contained_.insert(obj);
- }
-
- inline void Clear(const mirror::Object* obj) {
- Objects::iterator found = contained_.find(obj);
- if (found != contained_.end()) {
- contained_.erase(found);
- }
- }
-
- void Clear() {
- contained_.clear();
- }
-
- inline bool Test(const mirror::Object* obj) const {
- return contained_.find(obj) != contained_.end();
- }
-
- const std::string& GetName() const {
- return name_;
- }
-
- void SetName(const std::string& name) {
- name_ = name;
- }
-
- void CopyFrom(const ObjectSet& space_set) {
- contained_ = space_set.contained_;
- }
-
- void Walk(ObjectCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- template <typename Visitor>
- void Visit(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS {
- for (const mirror::Object* obj : contained_) {
- visitor(const_cast<mirror::Object*>(obj));
- }
- }
-
- explicit ObjectSet(const std::string& name) : name_(name) {}
- ~ObjectSet() {}
-
- Objects& GetObjects() {
- return contained_;
- }
-
- private:
- std::string name_;
- Objects contained_;
-};
+typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
+typedef SpaceBitmap<kLargeObjectAlignment> LargeObjectBitmap;
-std::ostream& operator << (std::ostream& stream, const SpaceBitmap& bitmap);
+template<size_t kAlignment>
+std::ostream& operator << (std::ostream& stream, const SpaceBitmap<kAlignment>& bitmap);
} // namespace accounting
} // namespace gc
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 68994a8..972f94d 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -32,14 +32,15 @@ class SpaceBitmapTest : public CommonRuntimeTest {};
TEST_F(SpaceBitmapTest, Init) {
byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
size_t heap_capacity = 16 * MB;
- UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test bitmap",
- heap_begin, heap_capacity));
+ UniquePtr<ContinuousSpaceBitmap> space_bitmap(
+ ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(space_bitmap.get() != NULL);
}
class BitmapVerify {
public:
- BitmapVerify(SpaceBitmap* bitmap, const mirror::Object* begin, const mirror::Object* end)
+ BitmapVerify(ContinuousSpaceBitmap* bitmap, const mirror::Object* begin,
+ const mirror::Object* end)
: bitmap_(bitmap),
begin_(begin),
end_(end) {}
@@ -50,7 +51,7 @@ class BitmapVerify {
EXPECT_EQ(bitmap_->Test(obj), ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
}
- SpaceBitmap* bitmap_;
+ ContinuousSpaceBitmap* bitmap_;
const mirror::Object* begin_;
const mirror::Object* end_;
};
@@ -59,14 +60,14 @@ TEST_F(SpaceBitmapTest, ScanRange) {
byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
size_t heap_capacity = 16 * MB;
- UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test bitmap",
- heap_begin, heap_capacity));
+ UniquePtr<ContinuousSpaceBitmap> space_bitmap(
+ ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(space_bitmap.get() != NULL);
// Set all the odd bits in the first BitsPerWord * 3 to one.
for (size_t j = 0; j < kBitsPerWord * 3; ++j) {
const mirror::Object* obj =
- reinterpret_cast<mirror::Object*>(heap_begin + j * SpaceBitmap::kAlignment);
+ reinterpret_cast<mirror::Object*>(heap_begin + j * kObjectAlignment);
if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
space_bitmap->Set(obj);
}
@@ -77,10 +78,10 @@ TEST_F(SpaceBitmapTest, ScanRange) {
// words.
for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
mirror::Object* start =
- reinterpret_cast<mirror::Object*>(heap_begin + i * SpaceBitmap::kAlignment);
+ reinterpret_cast<mirror::Object*>(heap_begin + i * kObjectAlignment);
for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
mirror::Object* end =
- reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * SpaceBitmap::kAlignment);
+ reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * kObjectAlignment);
BitmapVerify(space_bitmap.get(), start, end);
}
}
@@ -109,7 +110,8 @@ class RandGen {
uint32_t val_;
};
-void compat_test() NO_THREAD_SAFETY_ANALYSIS {
+template <size_t kAlignment>
+void RunTest() NO_THREAD_SAFETY_ANALYSIS {
byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
size_t heap_capacity = 16 * MB;
@@ -118,11 +120,11 @@ void compat_test() NO_THREAD_SAFETY_ANALYSIS {
for (int i = 0; i < 5 ; ++i) {
- UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test bitmap",
- heap_begin, heap_capacity));
+ UniquePtr<ContinuousSpaceBitmap> space_bitmap(
+ ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
for (int j = 0; j < 10000; ++j) {
- size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
bool set = r.next() % 2 == 1;
if (set) {
@@ -136,15 +138,15 @@ void compat_test() NO_THREAD_SAFETY_ANALYSIS {
size_t count = 0;
SimpleCounter c(&count);
- size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
size_t remain = heap_capacity - offset;
- size_t end = offset + ((r.next() % (remain + 1)) & ~(0x7));
+ size_t end = offset + RoundDown(r.next() % (remain + 1), kAlignment);
space_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(heap_begin) + offset,
reinterpret_cast<uintptr_t>(heap_begin) + end, c);
size_t manual = 0;
- for (uintptr_t k = offset; k < end; k += kObjectAlignment) {
+ for (uintptr_t k = offset; k < end; k += kAlignment) {
if (space_bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + k))) {
manual++;
}
@@ -155,8 +157,12 @@ void compat_test() NO_THREAD_SAFETY_ANALYSIS {
}
}
-TEST_F(SpaceBitmapTest, Visitor) {
- compat_test();
+TEST_F(SpaceBitmapTest, VisitorObjectAlignment) {
+ RunTest<kObjectAlignment>();
+}
+
+TEST_F(SpaceBitmapTest, VisitorPageAlignment) {
+ RunTest<kPageSize>();
}
} // namespace accounting
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index a700c73..6380cba 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -174,8 +174,8 @@ void GarbageCollector::SwapBitmaps() {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
(gc_type == kGcTypeFull &&
space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
if (live_bitmap != nullptr && live_bitmap != mark_bitmap) {
heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
@@ -185,12 +185,12 @@ void GarbageCollector::SwapBitmaps() {
}
}
for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
- space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
- accounting::ObjectSet* live_set = space->GetLiveObjects();
- accounting::ObjectSet* mark_set = space->GetMarkObjects();
- heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
- heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
- down_cast<space::LargeObjectSpace*>(space)->SwapBitmaps();
+ space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace();
+ accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap();
+ accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap();
+ heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set);
+ heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set);
+ space->SwapBitmaps();
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index bb41b57..8af4fd8 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -99,7 +99,6 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
name_prefix +
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
gc_barrier_(new Barrier(0)),
- large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
is_concurrent_(is_concurrent) {
}
@@ -123,7 +122,6 @@ void MarkSweep::InitializePhase() {
mark_immune_count_ = 0;
mark_fastpath_count_ = 0;
mark_slowpath_count_ = 0;
- FindDefaultSpaceBitmap();
{
// TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -293,15 +291,21 @@ void MarkSweep::ReclaimPhase() {
void MarkSweep::FindDefaultSpaceBitmap() {
TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
+ // We want to have the main space instead of non moving if possible.
if (bitmap != nullptr &&
space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
current_space_bitmap_ = bitmap;
- return;
+ // If we are not the non moving space exit the loop early since this will be good enough.
+ if (space != heap_->GetNonMovingSpace()) {
+ break;
+ }
}
}
- GetHeap()->DumpSpaces();
- LOG(FATAL) << "Could not find a default mark bitmap";
+ if (current_space_bitmap_ == nullptr) {
+ heap_->DumpSpaces();
+ LOG(FATAL) << "Could not find a default mark bitmap";
+ }
}
void MarkSweep::ExpandMarkStack() {
@@ -323,7 +327,7 @@ void MarkSweep::ResizeMarkStack(size_t new_size) {
}
inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
if (MarkObjectParallel(obj)) {
MutexLock mu(Thread::Current(), mark_stack_lock_);
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
@@ -344,6 +348,31 @@ void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*
reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
}
+class MarkSweepMarkObjectSlowPath {
+ public:
+ explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
+ }
+
+ void operator()(const Object* obj) const ALWAYS_INLINE {
+ if (kProfileLargeObjects) {
+ // TODO: Differentiate between marking and testing somehow.
+ ++mark_sweep_->large_object_test_;
+ ++mark_sweep_->large_object_mark_;
+ }
+ space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
+ if (UNLIKELY(!IsAligned<kPageSize>(obj) ||
+ (kIsDebugBuild && !large_object_space->Contains(obj)))) {
+ LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
+ LOG(ERROR) << "Attempting see if it's a bad root";
+ mark_sweep_->VerifyRoots();
+ LOG(FATAL) << "Can't mark invalid object";
+ }
+ }
+
+ private:
+ MarkSweep* const mark_sweep_;
+};
+
inline void MarkSweep::MarkObjectNonNull(Object* obj) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
@@ -354,27 +383,24 @@ inline void MarkSweep::MarkObjectNonNull(Object* obj) {
if (kCountMarkedObjects) {
++mark_immune_count_;
}
- DCHECK(IsMarked(obj));
- return;
- }
- // Try to take advantage of locality of references within a space, failing this find the space
- // the hard way.
- accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
- if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
+ DCHECK(mark_bitmap_->Test(obj));
+ } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
+ if (kCountMarkedObjects) {
+ ++mark_fastpath_count_;
+ }
+ if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
+ PushOnMarkStack(obj); // This object was not previously marked.
+ }
+ } else {
if (kCountMarkedObjects) {
++mark_slowpath_count_;
}
- if (UNLIKELY(object_bitmap == nullptr)) {
- MarkLargeObject(obj, true);
- return;
+ MarkSweepMarkObjectSlowPath visitor(this);
+ // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
+ // will check again.
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ PushOnMarkStack(obj); // Was not already marked, push.
}
- } else if (kCountMarkedObjects) {
- ++mark_fastpath_count_;
- }
- // This object was not previously marked.
- if (!object_bitmap->Set(obj)) {
- PushOnMarkStack(obj);
}
}
@@ -388,34 +414,6 @@ inline void MarkSweep::PushOnMarkStack(Object* obj) {
mark_stack_->PushBack(obj);
}
-// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
-bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
- // TODO: support >1 discontinuous space.
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
- if (kProfileLargeObjects) {
- ++large_object_test_;
- }
- if (UNLIKELY(!large_objects->Test(obj))) {
- if (!large_object_space->Contains(obj)) {
- LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
- LOG(ERROR) << "Attempting see if it's a bad root";
- VerifyRoots();
- LOG(FATAL) << "Can't mark bad root";
- }
- if (kProfileLargeObjects) {
- ++large_object_mark_;
- }
- if (set) {
- large_objects->Set(obj);
- } else {
- large_objects->Clear(obj);
- }
- return true;
- }
- return false;
-}
-
inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
@@ -428,20 +426,12 @@ inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
}
// Try to take advantage of locality of references within a space, failing this find the space
// the hard way.
- accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
- if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
- if (new_bitmap != NULL) {
- object_bitmap = new_bitmap;
- } else {
- // TODO: Remove the Thread::Current here?
- // TODO: Convert this to some kind of atomic marking?
- MutexLock mu(Thread::Current(), large_object_lock_);
- return MarkLargeObject(obj, true);
- }
+ accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
+ if (LIKELY(object_bitmap->HasAddress(obj))) {
+ return !object_bitmap->AtomicTestAndSet(obj);
}
- // Return true if the object was not previously marked.
- return !object_bitmap->AtomicTestAndSet(obj);
+ MarkSweepMarkObjectSlowPath visitor(this);
+ return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
}
// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
@@ -476,7 +466,7 @@ void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor,
RootType root_type) {
// See if the root is on any space bitmap.
- if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
+ if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
if (!large_object_space->Contains(root)) {
LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
@@ -686,7 +676,8 @@ class MarkStackTask : public Task {
class CardScanTask : public MarkStackTask<false> {
public:
- CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
+ CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
+ accounting::ContinuousSpaceBitmap* bitmap,
byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
Object** mark_stack_obj)
: MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
@@ -697,7 +688,7 @@ class CardScanTask : public MarkStackTask<false> {
}
protected:
- accounting::SpaceBitmap* const bitmap_;
+ accounting::ContinuousSpaceBitmap* const bitmap_;
byte* const begin_;
byte* const end_;
const byte minimum_age_;
@@ -719,7 +710,7 @@ class CardScanTask : public MarkStackTask<false> {
size_t MarkSweep::GetThreadCount(bool paused) const {
if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
- return 0;
+ return 1;
}
if (paused) {
return heap_->GetParallelGCThreadCount() + 1;
@@ -733,7 +724,7 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
ThreadPool* thread_pool = GetHeap()->GetThreadPool();
size_t thread_count = GetThreadCount(paused);
// The parallel version with only one thread is faster for card scanning, TODO: fix.
- if (kParallelCardScan && thread_count > 0) {
+ if (kParallelCardScan && thread_count > 1) {
Thread* self = Thread::Current();
// Can't have a different split for each space since multiple spaces can have their cards being
// scanned at the same time.
@@ -820,7 +811,7 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
class RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
- accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
+ accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
: MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
bitmap_(bitmap),
begin_(begin),
@@ -828,7 +819,7 @@ class RecursiveMarkTask : public MarkStackTask<false> {
}
protected:
- accounting::SpaceBitmap* const bitmap_;
+ accounting::ContinuousSpaceBitmap* const bitmap_;
const uintptr_t begin_;
const uintptr_t end_;
@@ -944,14 +935,11 @@ mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg
void MarkSweep::VerifyIsLive(const Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
- space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
- if (!large_object_space->GetLiveObjects()->Test(obj)) {
- if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
- heap_->allocation_stack_->End()) {
- // Object not found!
- heap_->DumpSpaces();
- LOG(FATAL) << "Found dead object " << obj;
- }
+ if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
+ heap_->allocation_stack_->End()) {
+ // Object not found!
+ heap_->DumpSpaces();
+ LOG(FATAL) << "Found dead object " << obj;
}
}
}
@@ -1045,8 +1033,8 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
// Start by sweeping the continuous spaces.
for (space::ContinuousSpace* space : sweep_spaces) {
space::AllocSpace* alloc_space = space->AsAllocSpace();
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
@@ -1086,8 +1074,8 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
}
// Handle the large object space.
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
- accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
+ accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+ accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
if (swap_bitmaps) {
std::swap(large_live_objects, large_mark_objects);
}
@@ -1131,7 +1119,6 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
timings_.EndSplit();
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit("Sweep", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -1149,13 +1136,13 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
}
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+ TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
size_t freed_objects = 0;
size_t freed_bytes = 0;
- GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
freed_large_objects_.FetchAndAdd(freed_objects);
freed_large_object_bytes_.FetchAndAdd(freed_bytes);
- GetHeap()->RecordFree(freed_objects, freed_bytes);
+ heap_->RecordFree(freed_objects, freed_bytes);
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index d49e427..41a7764 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
+#include "gc/accounting/space_bitmap.h"
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -45,7 +46,6 @@ class Heap;
namespace accounting {
template<typename T> class AtomicStack;
typedef AtomicStack<mirror::Object*> ObjectStack;
- class SpaceBitmap;
} // namespace accounting
namespace collector {
@@ -227,11 +227,6 @@ class MarkSweep : public GarbageCollector {
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj);
- // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
- // mark, otherwise we unmark.
- bool MarkLargeObject(const mirror::Object* obj, bool set)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) LOCKS_EXCLUDED(large_object_lock_);
-
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -283,7 +278,7 @@ class MarkSweep : public GarbageCollector {
// Current space, we check this space first to avoid searching for the appropriate space for an
// object.
- accounting::SpaceBitmap* current_space_bitmap_;
+ accounting::ContinuousSpaceBitmap* current_space_bitmap_;
// Cache the heap's mark bitmap to prevent having to do 2 loads during slow path marking.
accounting::HeapBitmap* mark_bitmap_;
@@ -315,7 +310,6 @@ class MarkSweep : public GarbageCollector {
size_t live_stack_freeze_size_;
UniquePtr<Barrier> gc_barrier_;
- Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
const bool is_concurrent_;
@@ -326,8 +320,6 @@ class MarkSweep : public GarbageCollector {
friend class CheckBitmapVisitor;
friend class CheckReferenceVisitor;
friend class art::gc::Heap;
- friend class InternTableEntryIsUnmarked;
- friend class MarkIfReachesAllocspaceVisitor;
friend class MarkObjectVisitor;
friend class ModUnionCheckReferences;
friend class ModUnionClearCardVisitor;
@@ -336,10 +328,9 @@ class MarkSweep : public GarbageCollector {
friend class ModUnionTableBitmap;
friend class ModUnionTableReferenceCache;
friend class ModUnionScanImageRootVisitor;
- friend class ScanBitmapVisitor;
- friend class ScanImageRootVisitor;
template<bool kUseFinger> friend class MarkStackTask;
friend class FifoMarkStackChunk;
+ friend class MarkSweepMarkObjectSlowPath;
DISALLOW_COPY_AND_ASSIGN(MarkSweep);
};
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index df731ff..55140f6 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -26,9 +26,24 @@ namespace art {
namespace gc {
namespace collector {
+class BitmapSetSlowPathVisitor {
+ public:
+ explicit BitmapSetSlowPathVisitor(SemiSpace* semi_space) : semi_space_(semi_space) {
+ }
+
+ void operator()(const mirror::Object* obj) const {
+ CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
+ // Marking a large object, make sure its aligned as a sanity check.
+ CHECK(IsAligned<kPageSize>(obj));
+ }
+
+ private:
+ SemiSpace* const semi_space_;
+};
+
inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
DCHECK(from_space_->HasAddress(obj));
- LockWord lock_word = obj->GetLockWord();
+ LockWord lock_word = obj->GetLockWord(false);
if (lock_word.GetState() != LockWord::kForwardingAddress) {
return nullptr;
}
@@ -53,37 +68,29 @@ inline void SemiSpace::MarkObject(
if (from_space_->HasAddress(obj)) {
mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
// If the object has already been moved, return the new forward address.
- if (forward_address == nullptr) {
+ if (UNLIKELY(forward_address == nullptr)) {
forward_address = MarkNonForwardedObject(obj);
DCHECK(forward_address != nullptr);
// Make sure to only update the forwarding address AFTER you copy the object so that the
// monitor word doesn't get stomped over.
- obj->SetLockWord(LockWord::FromForwardingAddress(
- reinterpret_cast<size_t>(forward_address)));
+ obj->SetLockWord(
+ LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
// Push the object onto the mark stack for later processing.
MarkStackPush(forward_address);
}
obj_ptr->Assign(forward_address);
} else {
- accounting::SpaceBitmap* object_bitmap =
- heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
- if (LIKELY(object_bitmap != nullptr)) {
- if (generational_) {
- // If a bump pointer space only collection, we should not
- // reach here as we don't/won't mark the objects in the
- // non-moving space (except for the promoted objects.) Note
- // the non-moving space is added to the immune space.
- DCHECK(whole_heap_collection_);
- }
- if (!object_bitmap->Set(obj)) {
- // This object was not previously marked.
- MarkStackPush(obj);
- }
- } else {
- CHECK(!to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
- if (MarkLargeObject(obj)) {
- MarkStackPush(obj);
- }
+ BitmapSetSlowPathVisitor visitor(this);
+ if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
+ // If a bump pointer space only collection, we should not
+ // reach here as we don't/won't mark the objects in the
+ // non-moving space (except for the promoted objects.) Note
+ // the non-moving space is added to the immune space.
+ DCHECK(!generational_ || whole_heap_collection_);
+ }
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index ccb38c4..b67bbb1 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -117,6 +117,8 @@ void SemiSpace::InitializePhase() {
immune_region_.Reset();
is_large_object_space_immune_ = false;
saved_bytes_ = 0;
+ bytes_moved_ = 0;
+ objects_moved_ = 0;
self_ = Thread::Current();
// Do any pre GC verification.
timings_.NewSplit("PreGcVerification");
@@ -124,6 +126,11 @@ void SemiSpace::InitializePhase() {
CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
// Set the initial bitmap.
to_space_live_bitmap_ = to_space_->GetLiveBitmap();
+ {
+ // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ mark_bitmap_ = heap_->GetMarkBitmap();
+ }
}
void SemiSpace::ProcessReferences(Thread* self) {
@@ -312,8 +319,8 @@ void SemiSpace::MarkReachableObjects() {
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
- timings_.EndSplit();
+ timings_.NewSplit("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
// If the space is immune and has no mod union table (the
// non-moving space when the bump pointer space only collection is
@@ -333,7 +340,7 @@ void SemiSpace::MarkReachableObjects() {
// remain in the space, that is, the remembered set (and the
// card table) didn't miss any from-space references in the
// space.
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
@@ -341,7 +348,7 @@ void SemiSpace::MarkReachableObjects() {
}
} else {
DCHECK(rem_set == nullptr);
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
@@ -351,6 +358,7 @@ void SemiSpace::MarkReachableObjects() {
}
if (is_large_object_space_immune_) {
+ timings_.NewSplit("VisitLargeObjects");
DCHECK(generational_ && !whole_heap_collection_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
@@ -362,13 +370,13 @@ void SemiSpace::MarkReachableObjects() {
// classes (primitive array classes) that could move though they
// don't contain any other references.
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
+ accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
- for (const Object* obj : large_live_objects->GetObjects()) {
- visitor(const_cast<Object*>(obj));
- }
+ large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
+ reinterpret_cast<uintptr_t>(large_object_space->End()),
+ visitor);
}
-
+ timings_.EndSplit();
// Recursively process the mark stack.
ProcessMarkStack();
}
@@ -382,9 +390,9 @@ void SemiSpace::ReclaimPhase() {
}
// Record freed memory.
uint64_t from_bytes = from_space_->GetBytesAllocated();
- uint64_t to_bytes = to_space_->GetBytesAllocated();
+ uint64_t to_bytes = bytes_moved_;
uint64_t from_objects = from_space_->GetObjectsAllocated();
- uint64_t to_objects = to_space_->GetObjectsAllocated();
+ uint64_t to_objects = objects_moved_;
CHECK_LE(to_objects, from_objects);
int64_t freed_bytes = from_bytes - to_bytes;
int64_t freed_objects = from_objects - to_objects;
@@ -450,19 +458,6 @@ inline void SemiSpace::MarkStackPush(Object* obj) {
mark_stack_->PushBack(obj);
}
-// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
-bool SemiSpace::MarkLargeObject(const Object* obj) {
- // TODO: support >1 discontinuous space.
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- DCHECK(large_object_space->Contains(obj));
- accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
- if (UNLIKELY(!large_objects->Test(obj))) {
- large_objects->Set(obj);
- return true;
- }
- return false;
-}
-
static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
// We will dirty the current page and somewhere in the middle of the next page. This means
@@ -521,23 +516,21 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
- size_t bytes_promoted;
space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr);
- if (forward_address == nullptr) {
+ forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_allocated, nullptr);
+ if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
} else {
- GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
- bytes_promoted_ += bytes_promoted;
+ bytes_promoted_ += bytes_allocated;
// Dirty the card at the destionation as it may contain
// references (including the class pointer) to the bump pointer
// space.
GetHeap()->WriteBarrierEveryFieldOf(forward_address);
// Handle the bitmaps marking.
- accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK(!live_bitmap->Test(forward_address));
if (!whole_heap_collection_) {
@@ -573,6 +566,8 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
// If it's allocated after the last GC (younger), copy it to the to-space.
forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
}
+ ++objects_moved_;
+ bytes_moved_ += bytes_allocated;
// Copy over the object and add it to the mark stack since we still need to update its
// references.
saved_bytes_ +=
@@ -619,10 +614,9 @@ void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*
// Marks all objects in the root set.
void SemiSpace::MarkRoots() {
- timings_.StartSplit("MarkRoots");
+ timings_.NewSplit("MarkRoots");
// TODO: Visit up image roots as well?
Runtime::Current()->VisitRoots(MarkRootCallback, this);
- timings_.EndSplit();
}
mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
@@ -641,7 +635,7 @@ bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
void SemiSpace::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit("Sweep", &timings_);
+ TimingLogger::ScopedSplit split("Sweep", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -665,13 +659,13 @@ void SemiSpace::Sweep(bool swap_bitmaps) {
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
DCHECK(!is_large_object_space_immune_);
- TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+ TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
size_t freed_objects = 0;
size_t freed_bytes = 0;
- GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
freed_large_objects_.FetchAndAdd(freed_objects);
freed_large_object_bytes_.FetchAndAdd(freed_bytes);
- GetHeap()->RecordFree(freed_objects, freed_bytes);
+ heap_->RecordFree(freed_objects, freed_bytes);
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
@@ -710,8 +704,8 @@ void SemiSpace::ScanObject(Object* obj) {
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack() {
- space::MallocSpace* promo_dest_space = NULL;
- accounting::SpaceBitmap* live_bitmap = NULL;
+ space::MallocSpace* promo_dest_space = nullptr;
+ accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
if (generational_ && !whole_heap_collection_) {
// If a bump pointer space only collection (and the promotion is
// enabled,) we delay the live-bitmap marking of promoted objects
@@ -719,7 +713,7 @@ void SemiSpace::ProcessMarkStack() {
promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
live_bitmap = promo_dest_space->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 3442751..3d635f0 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -21,6 +21,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
+#include "gc/accounting/space_bitmap.h"
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -42,7 +43,6 @@ class Heap;
namespace accounting {
template <typename T> class AtomicStack;
typedef AtomicStack<mirror::Object*> ObjectStack;
- class SpaceBitmap;
} // namespace accounting
namespace space {
@@ -198,8 +198,11 @@ class SemiSpace : public GarbageCollector {
// Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has
// a live bitmap or doesn't).
space::ContinuousMemMapAllocSpace* to_space_;
- accounting::SpaceBitmap* to_space_live_bitmap_; // Cached live bitmap as an optimization.
+ // Cached live bitmap as an optimization.
+ accounting::ContinuousSpaceBitmap* to_space_live_bitmap_;
space::ContinuousMemMapAllocSpace* from_space_;
+ // Cached mark bitmap as an optimization.
+ accounting::HeapBitmap* mark_bitmap_;
Thread* self_;
@@ -230,6 +233,11 @@ class SemiSpace : public GarbageCollector {
// whole_heap_collection_ once per interval.
int whole_heap_collection_interval_counter_;
+ // How many objects and bytes we moved, used so that we don't need to get the size of the
+ // to_space_ when calculating how many objects and bytes we freed.
+ size_t bytes_moved_;
+ size_t objects_moved_;
+
// How many bytes we avoided dirtying.
size_t saved_bytes_;
@@ -242,6 +250,7 @@ class SemiSpace : public GarbageCollector {
static constexpr int kDefaultWholeHeapCollectionInterval = 5;
private:
+ friend class BitmapSetSlowPathVisitor;
DISALLOW_COPY_AND_ASSIGN(SemiSpace);
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 479ea2e..502da12 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -88,7 +88,8 @@ static constexpr bool kCompactZygote = kMovingCollector;
static constexpr size_t kNonMovingSpaceCapacity = 64 * MB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
- double target_utilization, size_t capacity, const std::string& image_file_name,
+ double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
+ const std::string& image_file_name,
CollectorType foreground_collector_type, CollectorType background_collector_type,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_log_threshold, size_t long_gc_log_threshold,
@@ -154,6 +155,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
min_free_(min_free),
max_free_(max_free),
target_utilization_(target_utilization),
+ foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
total_wait_time_(0),
total_allocation_time_(0),
verify_object_mode_(kVerifyObjectModeDisabled),
@@ -223,10 +225,15 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// the bitmap from the main space.
if (kMovingCollector) {
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
- bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space", capacity, nullptr);
+ // TODO: Not create all the bump pointer spaces if not necessary (currently only GSS needs all
+ // 2 of bump pointer spaces + main space) b/14059466. Divide by 2 for a temporary fix.
+ const size_t bump_pointer_space_capacity = capacity / 2;
+ bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
+ bump_pointer_space_capacity, nullptr);
CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(bump_pointer_space_);
- temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", capacity, nullptr);
+ temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
+ bump_pointer_space_capacity, nullptr);
CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(temp_space_);
}
@@ -348,15 +355,15 @@ void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t gr
}
if (kUseRosAlloc) {
main_space_ = space::RosAllocSpace::CreateFromMemMap(mem_map, "main rosalloc space",
- kDefaultStartingSize, initial_size,
- growth_limit, capacity, low_memory_mode_,
- can_move_objects);
+ kDefaultStartingSize, initial_size,
+ growth_limit, capacity, low_memory_mode_,
+ can_move_objects);
CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
} else {
main_space_ = space::DlMallocSpace::CreateFromMemMap(mem_map, "main dlmalloc space",
- kDefaultStartingSize, initial_size,
- growth_limit, capacity,
- can_move_objects);
+ kDefaultStartingSize, initial_size,
+ growth_limit, capacity,
+ can_move_objects);
CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
}
main_space_->SetFootprintLimit(main_space_->Capacity());
@@ -562,7 +569,7 @@ void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
space2 = space1;
}
MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
- large_object_space_->GetLiveObjects(), stack);
+ large_object_space_->GetLiveBitmap(), stack);
}
void Heap::DeleteThreadPool() {
@@ -576,8 +583,8 @@ void Heap::AddSpace(space::Space* space, bool set_as_default) {
DCHECK(!space->IsDiscontinuousSpace());
space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
// Continuous spaces don't necessarily have bitmaps.
- accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
if (live_bitmap != nullptr) {
DCHECK(mark_bitmap != nullptr);
live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
@@ -599,10 +606,8 @@ void Heap::AddSpace(space::Space* space, bool set_as_default) {
} else {
DCHECK(space->IsDiscontinuousSpace());
space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
- DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
- live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
- DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
- mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+ live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
+ mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
discontinuous_spaces_.push_back(discontinuous_space);
}
if (space->IsAllocSpace()) {
@@ -617,8 +622,8 @@ void Heap::RemoveSpace(space::Space* space) {
DCHECK(!space->IsDiscontinuousSpace());
space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
// Continuous spaces don't necessarily have bitmaps.
- accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
if (live_bitmap != nullptr) {
DCHECK(mark_bitmap != nullptr);
live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
@@ -642,10 +647,8 @@ void Heap::RemoveSpace(space::Space* space) {
} else {
DCHECK(space->IsDiscontinuousSpace());
space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
- DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
- live_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
- DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
- mark_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+ live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
+ mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
discontinuous_space);
DCHECK(it != discontinuous_spaces_.end());
@@ -1043,7 +1046,7 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
return temp_space_->Contains(obj);
}
space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
- space::DiscontinuousSpace* d_space = NULL;
+ space::DiscontinuousSpace* d_space = nullptr;
if (c_space != nullptr) {
if (c_space->GetLiveBitmap()->Test(obj)) {
return true;
@@ -1051,7 +1054,7 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
if (d_space != nullptr) {
- if (d_space->GetLiveObjects()->Test(obj)) {
+ if (d_space->GetLiveBitmap()->Test(obj)) {
return true;
}
}
@@ -1089,7 +1092,7 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
}
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
- if (d_space != nullptr && d_space->GetLiveObjects()->Test(obj)) {
+ if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
return true;
}
}
@@ -1098,8 +1101,8 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
void Heap::DumpSpaces(std::ostream& stream) {
for (const auto& space : continuous_spaces_) {
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
stream << space << " " << *space << "\n";
if (live_bitmap != nullptr) {
stream << live_bitmap << " " << *live_bitmap << "\n";
@@ -1407,7 +1410,6 @@ void Heap::TransitionCollector(CollectorType collector_type) {
VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
- uint32_t before_size = GetTotalMemory();
uint32_t before_allocated = num_bytes_allocated_.Load();
ThreadList* tl = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
@@ -1477,16 +1479,10 @@ void Heap::TransitionCollector(CollectorType collector_type) {
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
- int32_t after_size = GetTotalMemory();
- int32_t delta_size = before_size - after_size;
int32_t after_allocated = num_bytes_allocated_.Load();
int32_t delta_allocated = before_allocated - after_allocated;
- const std::string saved_bytes_str =
- delta_size < 0 ? "-" + PrettySize(-delta_size) : PrettySize(delta_size);
LOG(INFO) << "Heap transition to " << process_state_ << " took "
- << PrettyDuration(duration) << " " << PrettySize(before_size) << "->"
- << PrettySize(after_size) << " from " << PrettySize(delta_allocated) << " to "
- << PrettySize(delta_size) << " saved";
+ << PrettyDuration(duration) << " saved at least " << PrettySize(delta_allocated);
}
void Heap::ChangeCollector(CollectorType collector_type) {
@@ -1561,9 +1557,9 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
// Maps from bin sizes to locations.
std::multimap<size_t, uintptr_t> bins_;
// Live bitmap of the space which contains the bins.
- accounting::SpaceBitmap* bin_live_bitmap_;
+ accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
// Mark bitmap of the space which contains the bins.
- accounting::SpaceBitmap* bin_mark_bitmap_;
+ accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
static void Callback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1759,9 +1755,9 @@ void Heap::FlushAllocStack() {
allocation_stack_->Reset();
}
-void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
- accounting::SpaceBitmap* bitmap2,
- accounting::ObjectSet* large_objects,
+void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
+ accounting::ContinuousSpaceBitmap* bitmap2,
+ accounting::LargeObjectBitmap* large_objects,
accounting::ObjectStack* stack) {
DCHECK(bitmap1 != nullptr);
DCHECK(bitmap2 != nullptr);
@@ -2033,7 +2029,8 @@ class VerifyReferenceVisitor {
accounting::CardTable::kCardSize);
LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
<< "-" << cover_end;
- accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
+ accounting::ContinuousSpaceBitmap* bitmap =
+ heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
if (bitmap == nullptr) {
LOG(ERROR) << "Object " << obj << " has no bitmap";
@@ -2525,22 +2522,33 @@ collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_ty
return nullptr;
}
+double Heap::HeapGrowthMultiplier() const {
+ // If we don't care about pause times we are background, so return 1.0.
+ if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
+ return 1.0;
+ }
+ return foreground_heap_growth_multiplier_;
+}
+
void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
- const size_t bytes_allocated = GetBytesAllocated();
+ const uint64_t bytes_allocated = GetBytesAllocated();
last_gc_size_ = bytes_allocated;
last_gc_time_ns_ = NanoTime();
- size_t target_size;
+ uint64_t target_size;
collector::GcType gc_type = collector_ran->GetGcType();
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
- target_size = bytes_allocated / GetTargetHeapUtilization();
- if (target_size > bytes_allocated + max_free_) {
- target_size = bytes_allocated + max_free_;
- } else if (target_size < bytes_allocated + min_free_) {
- target_size = bytes_allocated + min_free_;
- }
+ const float multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
+ // foreground.
+ intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
+ CHECK_GE(delta, 0);
+ target_size = bytes_allocated + delta * multiplier;
+ target_size = std::min(target_size,
+ bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
+ target_size = std::max(target_size,
+ bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
@@ -2565,7 +2573,7 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
if (bytes_allocated + max_free_ < max_allowed_footprint_) {
target_size = bytes_allocated + max_free_;
} else {
- target_size = std::max(bytes_allocated, max_allowed_footprint_);
+ target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
}
}
if (!ignore_max_footprint_) {
@@ -2589,7 +2597,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
// Start a concurrent GC when we get close to the estimated remaining bytes. When the
// allocation rate is very high, remaining_bytes could tell us that we should start a GC
// right away.
- concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated);
+ concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
+ static_cast<size_t>(bytes_allocated));
}
}
}
@@ -2868,14 +2877,14 @@ void Heap::RemoveRememberedSet(space::Space* space) {
void Heap::ClearMarkedObjects() {
// Clear all of the spaces' mark bitmaps.
for (const auto& space : GetContinuousSpaces()) {
- accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
if (space->GetLiveBitmap() != mark_bitmap) {
mark_bitmap->Clear();
}
}
// Clear the marked objects in the discontinous space object sets.
for (const auto& space : GetDiscontinuousSpaces()) {
- space->GetMarkObjects()->Clear();
+ space->GetMarkBitmap()->Clear();
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 912cf7d..ceba8b6 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -55,7 +55,6 @@ namespace gc {
namespace accounting {
class HeapBitmap;
class ModUnionTable;
- class ObjectSet;
class RememberedSet;
} // namespace accounting
@@ -132,9 +131,8 @@ class Heap {
static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
static constexpr size_t kDefaultTLABSize = 256 * KB;
-
- // Default target utilization.
static constexpr double kDefaultTargetUtilization = 0.5;
+ static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
// Used so that we don't overflow the allocation time atomic integer.
static constexpr size_t kTimeAdjust = 1024;
@@ -148,7 +146,8 @@ class Heap {
// image_file_names names specify Spaces to load based on
// ImageWriter output.
explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
- size_t max_free, double target_utilization, size_t capacity,
+ size_t max_free, double target_utilization,
+ double foreground_heap_growth_multiplier, size_t capacity,
const std::string& original_image_file_name,
CollectorType foreground_collector_type, CollectorType background_collector_type,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
@@ -351,6 +350,10 @@ class Heap {
return low_memory_mode_;
}
+ // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
+ // Scales heap growth, min free, and max free.
+ double HeapGrowthMultiplier() const;
+
// Freed bytes can be negative in cases where we copy objects from a compacted space to a
// free-list backed space.
void RecordFree(ssize_t freed_objects, ssize_t freed_bytes);
@@ -470,8 +473,11 @@ class Heap {
LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
// Mark all the objects in the allocation stack in the specified bitmap.
- void MarkAllocStack(accounting::SpaceBitmap* bitmap1, accounting::SpaceBitmap* bitmap2,
- accounting::ObjectSet* large_objects, accounting::ObjectStack* stack)
+ // TODO: Refactor?
+ void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
+ accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
+ accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
+ accounting::ObjectStack* stack)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Mark the specified allocation stack as live.
@@ -925,6 +931,9 @@ class Heap {
// Target ideal heap utilization ratio
double target_utilization_;
+ // How much more we grow the heap when we are a foreground app instead of background.
+ double foreground_heap_growth_multiplier_;
+
// Total time which mutators are paused or waiting for GC to complete.
uint64_t total_wait_time_;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 07e5088..a85ad4d 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -60,13 +60,11 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
TEST_F(HeapTest, HeapBitmapCapacityTest) {
byte* heap_begin = reinterpret_cast<byte*>(0x1000);
- const size_t heap_capacity = accounting::SpaceBitmap::kAlignment * (sizeof(intptr_t) * 8 + 1);
- UniquePtr<accounting::SpaceBitmap> bitmap(accounting::SpaceBitmap::Create("test bitmap",
- heap_begin,
- heap_capacity));
+ const size_t heap_capacity = kObjectAlignment * (sizeof(intptr_t) * 8 + 1);
+ UniquePtr<accounting::ContinuousSpaceBitmap> bitmap(
+ accounting::ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
mirror::Object* fake_end_of_heap_object =
- reinterpret_cast<mirror::Object*>(&heap_begin[heap_capacity -
- accounting::SpaceBitmap::kAlignment]);
+ reinterpret_cast<mirror::Object*>(&heap_begin[heap_capacity - kObjectAlignment]);
bitmap->Set(fake_end_of_heap_object);
}
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 6bd0526..90ffe59 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -197,7 +197,7 @@ void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
}
}
-accounting::SpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
+accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
LOG(FATAL) << "Unimplemented";
return nullptr;
}
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index ecfeae5..e52a9a3 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -85,11 +85,11 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return GetMemMap()->Size();
}
- accounting::SpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
return nullptr;
}
- accounting::SpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
return nullptr;
}
@@ -138,7 +138,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- accounting::SpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index be88b33..41a0458 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#include "dlmalloc_space.h"
-
#include "dlmalloc_space-inl.h"
+
#include "gc/accounting/card_table.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index faa539f..91d8820 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -35,7 +35,7 @@ namespace space {
Atomic<uint32_t> ImageSpace::bitmap_index_(0);
ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map,
- accounting::SpaceBitmap* live_bitmap)
+ accounting::ContinuousSpaceBitmap* live_bitmap)
: MemMapSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyNeverCollect) {
DCHECK(live_bitmap != nullptr);
@@ -197,10 +197,10 @@ ImageSpace* ImageSpace::Init(const char* image_file_name, bool validate_oat_file
uint32_t bitmap_index = bitmap_index_.FetchAndAdd(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_file_name,
bitmap_index));
- UniquePtr<accounting::SpaceBitmap> bitmap(
- accounting::SpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
- reinterpret_cast<byte*>(map->Begin()),
- map->Size()));
+ UniquePtr<accounting::ContinuousSpaceBitmap> bitmap(
+ accounting::ContinuousSpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
+ reinterpret_cast<byte*>(map->Begin()),
+ map->Size()));
if (bitmap.get() == nullptr) {
*error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
return nullptr;
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 6b63d10..f6daf89 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_GC_SPACE_IMAGE_SPACE_H_
#define ART_RUNTIME_GC_SPACE_IMAGE_SPACE_H_
+#include "gc/accounting/space_bitmap.h"
#include "space.h"
namespace art {
@@ -59,11 +60,11 @@ class ImageSpace : public MemMapSpace {
return GetName();
}
- accounting::SpaceBitmap* GetLiveBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
return live_bitmap_.get();
}
- accounting::SpaceBitmap* GetMarkBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
// ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
// special cases to test against.
return live_bitmap_.get();
@@ -100,9 +101,10 @@ class ImageSpace : public MemMapSpace {
static Atomic<uint32_t> bitmap_index_;
- UniquePtr<accounting::SpaceBitmap> live_bitmap_;
+ UniquePtr<accounting::ContinuousSpaceBitmap> live_bitmap_;
- ImageSpace(const std::string& name, MemMap* mem_map, accounting::SpaceBitmap* live_bitmap);
+ ImageSpace(const std::string& name, MemMap* mem_map,
+ accounting::ContinuousSpaceBitmap* live_bitmap);
// The OatFile associated with the image during early startup to
// reserve space contiguous to the image. It is later released to
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 0b353c7..ce11b3d 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,12 +16,14 @@
#include "large_object_space.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "UniquePtr.h"
#include "image.h"
#include "os.h"
+#include "space-inl.h"
#include "thread-inl.h"
#include "utils.h"
@@ -74,26 +76,27 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
};
void LargeObjectSpace::SwapBitmaps() {
- live_objects_.swap(mark_objects_);
+ live_bitmap_.swap(mark_bitmap_);
// Swap names to get more descriptive diagnostics.
- std::string temp_name = live_objects_->GetName();
- live_objects_->SetName(mark_objects_->GetName());
- mark_objects_->SetName(temp_name);
+ std::string temp_name = live_bitmap_->GetName();
+ live_bitmap_->SetName(mark_bitmap_->GetName());
+ mark_bitmap_->SetName(temp_name);
}
-LargeObjectSpace::LargeObjectSpace(const std::string& name)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
: DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
- total_objects_allocated_(0) {
+ total_objects_allocated_(0), begin_(begin), end_(end) {
}
void LargeObjectSpace::CopyLiveToMarked() {
- mark_objects_->CopyFrom(*live_objects_.get());
+ mark_bitmap_->CopyFrom(live_bitmap_.get());
}
+// TODO: Use something cleaner than 0xFFFFFFFF.
LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
- : LargeObjectSpace(name),
+ : LargeObjectSpace(name, reinterpret_cast<byte*>(0xFFFFFFFF), nullptr),
lock_("large object map space lock", kAllocSpaceLock) {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
@@ -118,7 +121,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
size_t allocation_size = mem_map->Size();
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
+ begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
+ end_ = std::max(end_, reinterpret_cast<byte*>(obj) + allocation_size);
*bytes_allocated = allocation_size;
if (usable_size != nullptr) {
*usable_size = allocation_size;
@@ -191,9 +196,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_be
}
FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
- : LargeObjectSpace(name),
- begin_(begin),
- end_(end),
+ : LargeObjectSpace(name, begin, end),
mem_map_(mem_map),
lock_("free list space lock", kAllocSpaceLock) {
free_end_ = end - begin;
@@ -389,27 +392,41 @@ void FreeListSpace::Dump(std::ostream& os) const {
}
}
-void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
- // Sweep large objects
- accounting::ObjectSet* large_live_objects = GetLiveObjects();
- accounting::ObjectSet* large_mark_objects = GetMarkObjects();
- if (swap_bitmaps) {
- std::swap(large_live_objects, large_mark_objects);
- }
- DCHECK(freed_objects != nullptr);
- DCHECK(freed_bytes != nullptr);
- // O(n*log(n)) but hopefully there are not too many large objects.
- size_t objects = 0;
- size_t bytes = 0;
- Thread* self = Thread::Current();
- for (const mirror::Object* obj : large_live_objects->GetObjects()) {
- if (!large_mark_objects->Test(obj)) {
- bytes += Free(self, const_cast<mirror::Object*>(obj));
- ++objects;
+void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
+ Thread* self = context->self;
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+ // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
+ // the bitmaps as an optimization.
+ if (!context->swap_bitmaps) {
+ accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ bitmap->Clear(ptrs[i]);
}
}
- *freed_objects += objects;
- *freed_bytes += bytes;
+ context->freed_objects += num_ptrs;
+ context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+}
+
+void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* out_freed_objects,
+ size_t* out_freed_bytes) {
+ if (Begin() >= End()) {
+ return;
+ }
+ accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
+ accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
+ if (swap_bitmaps) {
+ std::swap(live_bitmap, mark_bitmap);
+ }
+ DCHECK(out_freed_objects != nullptr);
+ DCHECK(out_freed_bytes != nullptr);
+ SweepCallbackContext scc(swap_bitmaps, this);
+ accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
+ reinterpret_cast<uintptr_t>(Begin()),
+ reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
+ *out_freed_objects += scc.freed_objects;
+ *out_freed_bytes += scc.freed_bytes;
}
} // namespace space
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 18e518f..0daefba 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -49,11 +49,11 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
return num_objects_allocated_;
}
- uint64_t GetTotalBytesAllocated() {
+ uint64_t GetTotalBytesAllocated() const {
return total_bytes_allocated_;
}
- uint64_t GetTotalObjectsAllocated() {
+ uint64_t GetTotalObjectsAllocated() const {
return total_objects_allocated_;
}
@@ -73,20 +73,36 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
return this;
}
- void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+ void Sweep(bool swap_bitmaps, size_t* out_freed_objects, size_t* out_freed_bytes);
virtual bool CanMoveObjects() const OVERRIDE {
return false;
}
+ // Current address at which the space begins, which may vary as the space is filled.
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Current address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
protected:
- explicit LargeObjectSpace(const std::string& name);
+ explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
+
+ static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
// Approximate number of bytes which have been allocated into the space.
- size_t num_bytes_allocated_;
- size_t num_objects_allocated_;
- size_t total_bytes_allocated_;
- size_t total_objects_allocated_;
+ uint64_t num_bytes_allocated_;
+ uint64_t num_objects_allocated_;
+ uint64_t total_bytes_allocated_;
+ uint64_t total_objects_allocated_;
+
+ // Begin and end, may change as more large objects are allocated.
+ byte* begin_;
+ byte* end_;
friend class Space;
@@ -242,9 +258,6 @@ class FreeListSpace FINAL : public LargeObjectSpace {
typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
accounting::GcAllocator<AllocationHeader*> > FreeBlocks;
- byte* const begin_;
- byte* const end_;
-
// There is not footer for any allocations at the end of the space, so we keep track of how much
// free space there is at the end manually.
UniquePtr<MemMap> mem_map_;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index c3ca096..7493c19 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -48,15 +48,15 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
- live_bitmap_.reset(accounting::SpaceBitmap::Create(
+ live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
- DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #"
+ DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace live bitmap #"
<< bitmap_index;
- mark_bitmap_.reset(accounting::SpaceBitmap::Create(
+ mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
- DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #"
+ DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
<< bitmap_index;
}
for (auto& freed : recent_freed_objects_) {
@@ -180,11 +180,6 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
<< "GrowthLimit " << growth_limit_ << "\n"
<< "Capacity " << Capacity();
SetGrowthLimit(RoundUp(size, kPageSize));
- SetFootprintLimit(RoundUp(size, kPageSize));
-
- // TODO: Not hardcode these in?
- const size_t starting_size = kPageSize;
- const size_t initial_size = 2 * MB;
// FIXME: Do we need reference counted pointers here?
// Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
VLOG(heap) << "Creating new AllocSpace: ";
@@ -196,11 +191,11 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
UniquePtr<MemMap> mem_map(GetMemMap()->RemapAtEnd(end_, alloc_space_name,
PROT_READ | PROT_WRITE, &error_msg));
CHECK(mem_map.get() != nullptr) << error_msg;
- void* allocator = CreateAllocator(end_, starting_size, initial_size, capacity, low_memory_mode);
+ void* allocator = CreateAllocator(end_, starting_size_, initial_size_, capacity, low_memory_mode);
// Protect memory beyond the initial size.
- byte* end = mem_map->Begin() + starting_size;
- if (capacity - initial_size > 0) {
- CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name);
+ byte* end = mem_map->Begin() + starting_size_;
+ if (capacity > initial_size_) {
+ CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size_, PROT_NONE), alloc_space_name);
}
*out_malloc_space = CreateInstance(alloc_space_name, mem_map.release(), allocator, end_, end,
limit_, growth_limit, CanMoveObjects());
@@ -231,14 +226,13 @@ void MallocSpace::Dump(std::ostream& os) const {
void MallocSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
- DCHECK(context->space->IsMallocSpace());
space::MallocSpace* space = context->space->AsMallocSpace();
Thread* self = context->self;
Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
// If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
// the bitmaps as an optimization.
if (!context->swap_bitmaps) {
- accounting::SpaceBitmap* bitmap = space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* bitmap = space->GetLiveBitmap();
for (size_t i = 0; i < num_ptrs; ++i) {
bitmap->Clear(ptrs[i]);
}
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index dd4e5d4..d24016c 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -149,7 +149,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
+ virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
return &SweepCallback;
}
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index afac2a2..5a7d941 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -15,10 +15,10 @@
* limitations under the License.
*/
-#include "rosalloc_space.h"
-
#include "rosalloc_space-inl.h"
+
#include "gc/accounting/card_table.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 4af65a9..4e28416 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -18,6 +18,7 @@
#include "base/logging.h"
#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "runtime.h"
#include "thread-inl.h"
@@ -69,36 +70,34 @@ ContinuousMemMapAllocSpace* Space::AsContinuousMemMapAllocSpace() {
DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
GcRetentionPolicy gc_retention_policy) :
- Space(name, gc_retention_policy),
- live_objects_(new accounting::ObjectSet("large live objects")),
- mark_objects_(new accounting::ObjectSet("large marked objects")) {
+ Space(name, gc_retention_policy) {
+ // TODO: Fix this if we ever support objects not in the low 32 bit.
+ const size_t capacity = static_cast<size_t>(std::numeric_limits<uint32_t>::max());
+ live_bitmap_.reset(accounting::LargeObjectBitmap::Create("large live objects", nullptr,
+ capacity));
+ CHECK(live_bitmap_.get() != nullptr);
+ mark_bitmap_.reset(accounting::LargeObjectBitmap::Create("large marked objects", nullptr,
+ capacity));
+ CHECK(mark_bitmap_.get() != nullptr);
}
void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
DCHECK(freed_objects != nullptr);
DCHECK(freed_bytes != nullptr);
- accounting::SpaceBitmap* live_bitmap = GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = GetMarkBitmap();
// If the bitmaps are bound then sweeping this space clearly won't do anything.
if (live_bitmap == mark_bitmap) {
return;
}
- SweepCallbackContext scc;
- scc.swap_bitmaps = swap_bitmaps;
- scc.heap = Runtime::Current()->GetHeap();
- scc.self = Thread::Current();
- scc.space = this;
- scc.freed_objects = 0;
- scc.freed_bytes = 0;
+ SweepCallbackContext scc(swap_bitmaps, this);
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
// Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
- accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
- reinterpret_cast<uintptr_t>(Begin()),
- reinterpret_cast<uintptr_t>(End()),
- GetSweepCallback(),
- reinterpret_cast<void*>(&scc));
+ accounting::ContinuousSpaceBitmap::SweepWalk(
+ *live_bitmap, *mark_bitmap, reinterpret_cast<uintptr_t>(Begin()),
+ reinterpret_cast<uintptr_t>(End()), GetSweepCallback(), reinterpret_cast<void*>(&scc));
*freed_objects += scc.freed_objects;
*freed_bytes += scc.freed_bytes;
}
@@ -106,9 +105,9 @@ void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects,
// Returns the old mark bitmap.
void ContinuousMemMapAllocSpace::BindLiveToMarkBitmap() {
CHECK(!HasBoundBitmaps());
- accounting::SpaceBitmap* live_bitmap = GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = GetLiveBitmap();
if (live_bitmap != mark_bitmap_.get()) {
- accounting::SpaceBitmap* mark_bitmap = mark_bitmap_.release();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = mark_bitmap_.release();
Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
temp_bitmap_.reset(mark_bitmap);
mark_bitmap_.reset(live_bitmap);
@@ -122,7 +121,7 @@ bool ContinuousMemMapAllocSpace::HasBoundBitmaps() const {
void ContinuousMemMapAllocSpace::UnBindBitmaps() {
CHECK(HasBoundBitmaps());
// At this point, the temp_bitmap holds our old mark bitmap.
- accounting::SpaceBitmap* new_bitmap = temp_bitmap_.release();
+ accounting::ContinuousSpaceBitmap* new_bitmap = temp_bitmap_.release();
Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap_.get(), new_bitmap);
CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get());
mark_bitmap_.reset(new_bitmap);
@@ -137,6 +136,11 @@ void ContinuousMemMapAllocSpace::SwapBitmaps() {
mark_bitmap_->SetName(temp_name);
}
+Space::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
+ : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()), freed_objects(0),
+ freed_bytes(0) {
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index c9022f1..0a87a16 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -34,10 +34,6 @@ namespace mirror {
namespace gc {
-namespace accounting {
- class SpaceBitmap;
-} // namespace accounting
-
class Heap;
namespace space {
@@ -177,10 +173,11 @@ class Space {
protected:
struct SweepCallbackContext {
- bool swap_bitmaps;
- Heap* heap;
- space::Space* space;
- Thread* self;
+ public:
+ SweepCallbackContext(bool swap_bitmaps, space::Space* space);
+ const bool swap_bitmaps;
+ space::Space* const space;
+ Thread* const self;
size_t freed_objects;
size_t freed_bytes;
};
@@ -268,8 +265,8 @@ class ContinuousSpace : public Space {
return End() - Begin();
}
- virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
- virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
+ virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0;
+ virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0;
// Maximum which the mapped space can grow to.
virtual size_t Capacity() const {
@@ -317,15 +314,15 @@ class ContinuousSpace : public Space {
// is suitable for use for large primitive arrays.
class DiscontinuousSpace : public Space {
public:
- accounting::ObjectSet* GetLiveObjects() const {
- return live_objects_.get();
+ accounting::LargeObjectBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
}
- accounting::ObjectSet* GetMarkObjects() const {
- return mark_objects_.get();
+ accounting::LargeObjectBitmap* GetMarkBitmap() const {
+ return mark_bitmap_.get();
}
- virtual bool IsDiscontinuousSpace() const {
+ virtual bool IsDiscontinuousSpace() const OVERRIDE {
return true;
}
@@ -334,8 +331,8 @@ class DiscontinuousSpace : public Space {
protected:
DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
- UniquePtr<accounting::ObjectSet> live_objects_;
- UniquePtr<accounting::ObjectSet> mark_objects_;
+ UniquePtr<accounting::LargeObjectBitmap> live_bitmap_;
+ UniquePtr<accounting::LargeObjectBitmap> mark_bitmap_;
private:
DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
@@ -399,24 +396,24 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
void SwapBitmaps();
- // Reset the space back to an empty space and release memory.
+ // Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::SpaceBitmap* GetLiveBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
- accounting::SpaceBitmap* GetMarkBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
return mark_bitmap_.get();
}
void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
- virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() = 0;
+ virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
protected:
- UniquePtr<accounting::SpaceBitmap> live_bitmap_;
- UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
- UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
+ UniquePtr<accounting::ContinuousSpaceBitmap> live_bitmap_;
+ UniquePtr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
+ UniquePtr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a60ab38..0466413 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -40,8 +40,8 @@ class CountObjectsAllocated {
};
ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map,
- accounting::SpaceBitmap* live_bitmap,
- accounting::SpaceBitmap* mark_bitmap) {
+ accounting::ContinuousSpaceBitmap* live_bitmap,
+ accounting::ContinuousSpaceBitmap* mark_bitmap) {
DCHECK(live_bitmap != nullptr);
DCHECK(mark_bitmap != nullptr);
size_t objects_allocated = 0;
@@ -101,11 +101,11 @@ void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* ar
DCHECK(context->space->IsZygoteSpace());
ZygoteSpace* zygote_space = context->space->AsZygoteSpace();
Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
- accounting::CardTable* card_table = context->heap->GetCardTable();
+ accounting::CardTable* card_table = Runtime::Current()->GetHeap()->GetCardTable();
// If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
// the bitmaps as an optimization.
if (!context->swap_bitmaps) {
- accounting::SpaceBitmap* bitmap = zygote_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* bitmap = zygote_space->GetLiveBitmap();
for (size_t i = 0; i < num_ptrs; ++i) {
bitmap->Clear(ptrs[i]);
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 30370aa..50fc62b 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -17,16 +17,13 @@
#ifndef ART_RUNTIME_GC_SPACE_ZYGOTE_SPACE_H_
#define ART_RUNTIME_GC_SPACE_ZYGOTE_SPACE_H_
+#include "gc/accounting/space_bitmap.h"
#include "malloc_space.h"
#include "mem_map.h"
namespace art {
namespace gc {
-namespace accounting {
-class SpaceBitmap;
-}
-
namespace space {
// An zygote space is a space which you cannot allocate into or free from.
@@ -34,8 +31,8 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
public:
// Returns the remaining storage in the out_map field.
static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
- accounting::SpaceBitmap* live_bitmap,
- accounting::SpaceBitmap* mark_bitmap)
+ accounting::ContinuousSpaceBitmap* live_bitmap,
+ accounting::ContinuousSpaceBitmap* mark_bitmap)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) const;
@@ -78,7 +75,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
}
protected:
- virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
+ virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
return &SweepCallback;
}