summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/heap_bitmap-inl.h51
-rw-r--r--runtime/gc/accounting/heap_bitmap.h53
-rw-r--r--runtime/gc/accounting/mod_union_table-inl.h35
-rw-r--r--runtime/gc/accounting/mod_union_table.cc15
-rw-r--r--runtime/gc/accounting/mod_union_table.h2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc25
-rw-r--r--runtime/gc/collector/concurrent_copying.h56
-rw-r--r--runtime/gc/collector/garbage_collector.cc34
-rw-r--r--runtime/gc/collector/garbage_collector.h18
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h2
-rw-r--r--runtime/gc/collector/mark_sweep.cc232
-rw-r--r--runtime/gc/collector/mark_sweep.h42
-rw-r--r--runtime/gc/collector/semi_space.cc35
-rw-r--r--runtime/gc/collector/semi_space.h23
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h4
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/heap.cc112
-rw-r--r--runtime/gc/heap.h15
18 files changed, 372 insertions, 384 deletions
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 18b93d4..04e85d2 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -19,6 +19,8 @@
#include "heap_bitmap.h"
+#include "space_bitmap-inl.h"
+
namespace art {
namespace gc {
namespace accounting {
@@ -34,6 +36,55 @@ inline void HeapBitmap::Visit(const Visitor& visitor) {
}
}
+inline bool HeapBitmap::Test(const mirror::Object* obj) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ return bitmap->Test(obj);
+ } else {
+ return GetDiscontinuousSpaceObjectSet(obj) != NULL;
+ }
+}
+
+inline void HeapBitmap::Clear(const mirror::Object* obj) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ bitmap->Clear(obj);
+ } else {
+ ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
+ DCHECK(set != NULL);
+ set->Clear(obj);
+ }
+}
+
+inline void HeapBitmap::Set(const mirror::Object* obj) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ bitmap->Set(obj);
+ } else {
+ ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
+ DCHECK(set != NULL);
+ set->Set(obj);
+ }
+}
+
+inline SpaceBitmap* HeapBitmap::GetContinuousSpaceBitmap(const mirror::Object* obj) const {
+ for (const auto& bitmap : continuous_space_bitmaps_) {
+ if (bitmap->HasAddress(obj)) {
+ return bitmap;
+ }
+ }
+ return nullptr;
+}
+
+inline ObjectSet* HeapBitmap::GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const {
+ for (const auto& space_set : discontinuous_space_sets_) {
+ if (space_set->Test(obj)) {
+ return space_set;
+ }
+ }
+ return nullptr;
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index b23b12e..f729c0e 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -31,54 +31,11 @@ namespace accounting {
class HeapBitmap {
public:
- bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != nullptr)) {
- return bitmap->Test(obj);
- } else {
- return GetDiscontinuousSpaceObjectSet(obj) != NULL;
- }
- }
-
- void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Clear(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Clear(obj);
- }
- }
-
- void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Set(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Set(obj);
- }
- }
-
- SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) {
- for (const auto& bitmap : continuous_space_bitmaps_) {
- if (bitmap->HasAddress(obj)) {
- return bitmap;
- }
- }
- return nullptr;
- }
-
- ObjectSet* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) {
- for (const auto& space_set : discontinuous_space_sets_) {
- if (space_set->Test(obj)) {
- return space_set;
- }
- }
- return nullptr;
- }
+ bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
+ ObjectSet* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 76719b6..c756127 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -32,39 +32,8 @@ class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
space::ContinuousSpace* space)
: ModUnionTableReferenceCache(name, heap, space) {}
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) ALWAYS_INLINE {
- for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
- if (space->HasAddress(ref)) {
- return !space->IsImageSpace();
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
- }
-};
-
-// A mod-union table to record Zygote references to the alloc space.
-class ModUnionTableToAllocspace : public ModUnionTableReferenceCache {
- public:
- explicit ModUnionTableToAllocspace(const std::string& name, Heap* heap,
- space::ContinuousSpace* space)
- : ModUnionTableReferenceCache(name, heap, space) {}
-
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) ALWAYS_INLINE {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(); it != spaces.end(); ++it) {
- space::ContinuousSpace* space = *it;
- if (space->Contains(ref)) {
- // The allocation space is always considered for collection whereas the Zygote space is
- // only considered for full GC.
- return space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
+ bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+ return !space_->HasAddress(ref);
}
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 314f3c5..34ca654 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -76,7 +76,7 @@ class ModUnionUpdateObjectReferencesVisitor {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /* static */) const
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
mirror::HeapReference<Object>* obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
@@ -123,12 +123,12 @@ class AddToReferenceArrayVisitor {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /* static */) const
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = ref_ptr->AsMirrorPtr();
// Only add the reference if it is non null and fits our criteria.
- if (ref != nullptr && mod_union_table_->AddReference(obj, ref)) {
+ if (ref != nullptr && mod_union_table_->ShouldAddReference(ref)) {
// Push the adddress of the reference.
references_->push_back(ref_ptr);
}
@@ -168,10 +168,10 @@ class CheckReferenceVisitor {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
- if (ref != nullptr && mod_union_table_->AddReference(obj, ref) &&
+ if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) &&
references_.find(ref) == references_.end()) {
Heap* heap = mod_union_table_->GetHeap();
space::ContinuousSpace* from_space = heap->FindContinuousSpaceFromObject(obj, false);
@@ -260,8 +260,7 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) {
void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
void* arg) {
- Heap* heap = GetHeap();
- CardTable* card_table = heap->GetCardTable();
+ CardTable* card_table = heap_->GetCardTable();
std::vector<mirror::HeapReference<Object>*> cards_references;
ModUnionReferenceVisitor add_visitor(this, &cards_references);
@@ -271,7 +270,7 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallb
cards_references.clear();
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
- auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ auto* space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
DCHECK(space != nullptr);
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, add_visitor);
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index c4b020b..c3a90e2 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -117,7 +117,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
- virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
+ virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
new file mode 100644
index 0000000..079eeba
--- /dev/null
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "concurrent_copying.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
new file mode 100644
index 0000000..ab26a9c
--- /dev/null
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
+#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
+
+#include "garbage_collector.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+class ConcurrentCopying : public GarbageCollector {
+ public:
+ explicit ConcurrentCopying(Heap* heap, bool generational = false,
+ const std::string& name_prefix = "")
+ : GarbageCollector(heap,
+ name_prefix + (name_prefix.empty() ? "" : " ") +
+ "concurrent copying + mark sweep") {}
+
+ ~ConcurrentCopying() {}
+
+ virtual void InitializePhase() OVERRIDE {}
+ virtual void MarkingPhase() OVERRIDE {}
+ virtual void ReclaimPhase() OVERRIDE {}
+ virtual void FinishPhase() OVERRIDE {}
+ virtual GcType GetGcType() const OVERRIDE {
+ return kGcTypePartial;
+ }
+ virtual CollectorType GetCollectorType() const OVERRIDE {
+ return kCollectorTypeCC;
+ }
+ virtual void RevokeAllThreadLocalBuffers() OVERRIDE {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConcurrentCopying);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 65b5471..07951e0 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -39,16 +39,14 @@ GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
name_(name),
gc_cause_(kGcCauseForAlloc),
clear_soft_references_(false),
- verbose_(VLOG_IS_ON(heap)),
duration_ns_(0),
- timings_(name_.c_str(), true, verbose_),
+ timings_(name_.c_str(), true, VLOG_IS_ON(heap)),
pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
cumulative_timings_(name) {
ResetCumulativeStatistics();
}
-void GarbageCollector::HandleDirtyObjectsPhase() {
- LOG(FATAL) << "Unreachable";
+void GarbageCollector::PausePhase() {
}
void GarbageCollector::RegisterPause(uint64_t nano_length) {
@@ -87,12 +85,13 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
// Pause is the entire length of the GC.
uint64_t pause_start = NanoTime();
ATRACE_BEGIN("Application threads suspended");
- // Mutator lock may be already exclusively held when we do garbage collections for changing the
- // current collector / allocator during process state updates.
+ // Mutator lock may be already exclusively held when we do garbage collections for changing
+ // the current collector / allocator during process state updates.
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
// PreGcRosAllocVerification() is called in Heap::TransitionCollector().
RevokeAllThreadLocalBuffers();
MarkingPhase();
+ PausePhase();
ReclaimPhase();
// PostGcRosAllocVerification() is called in Heap::TransitionCollector().
} else {
@@ -102,6 +101,7 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
GetHeap()->PreGcRosAllocVerification(&timings_);
RevokeAllThreadLocalBuffers();
MarkingPhase();
+ PausePhase();
ReclaimPhase();
GetHeap()->PostGcRosAllocVerification(&timings_);
ATRACE_BEGIN("Resuming mutator threads");
@@ -126,7 +126,7 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
ATRACE_END();
ATRACE_BEGIN("All mutator threads suspended");
GetHeap()->PreGcRosAllocVerification(&timings_);
- HandleDirtyObjectsPhase();
+ PausePhase();
RevokeAllThreadLocalBuffers();
GetHeap()->PostGcRosAllocVerification(&timings_);
ATRACE_END();
@@ -142,12 +142,20 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
FinishPhase();
break;
}
+ case kCollectorTypeCC: {
+ // To be implemented.
+ break;
+ }
default: {
LOG(FATAL) << "Unreachable collector type=" << static_cast<size_t>(collector_type);
break;
}
}
-
+ // Add the current timings to the cumulative timings.
+ cumulative_timings_.AddLogger(timings_);
+ // Update cumulative statistics with how many bytes the GC iteration freed.
+ total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
+ total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
uint64_t end_time = NanoTime();
duration_ns_ = end_time - start_time;
total_time_ns_ += GetDurationNs();
@@ -186,6 +194,16 @@ void GarbageCollector::SwapBitmaps() {
}
}
+uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
+ // Add 1ms to prevent possible division by 0.
+ return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
+}
+
+uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
+ // Add 1ms to prevent possible division by 0.
+ return (freed_bytes_ * 1000) / (NsToMs(GetDurationNs()) + 1);
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 93fd2ab..ccfa9cf 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -68,8 +68,7 @@ class GarbageCollector {
TimingLogger& GetTimings() {
return timings_;
}
-
- CumulativeLogger& GetCumulativeTimings() {
+ const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
@@ -111,6 +110,17 @@ class GarbageCollector {
return pause_histogram_;
}
+ // Returns the estimated throughput in bytes / second.
+ uint64_t GetEstimatedMeanThroughput() const;
+
+ // Returns the estimated throughput of the last GC iteration.
+ uint64_t GetEstimatedLastIterationThroughput() const;
+
+ // Returns how many GC iterations have been run.
+ size_t GetIterations() const {
+ return GetCumulativeTimings().GetIterations();
+ }
+
protected:
// The initial phase. Done without mutators paused.
virtual void InitializePhase() = 0;
@@ -119,7 +129,7 @@ class GarbageCollector {
virtual void MarkingPhase() = 0;
// Only called for concurrent GCs.
- virtual void HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void PausePhase();
// Called with mutators running.
virtual void ReclaimPhase() = 0;
@@ -140,8 +150,6 @@ class GarbageCollector {
GcCause gc_cause_;
bool clear_soft_references_;
- const bool verbose_;
-
uint64_t duration_ns_;
TimingLogger timings_;
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 1cb2adb..974952d 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -21,7 +21,7 @@
#include "gc/heap.h"
#include "mirror/art_field.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/reference.h"
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 8abf5e2..91ccd64 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -27,31 +27,20 @@
#include "base/mutex-inl.h"
#include "base/timing_logger.h"
#include "gc/accounting/card_table-inl.h"
-#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "indirect_reference_table.h"
-#include "intern_table.h"
-#include "jni_internal.h"
-#include "monitor.h"
#include "mark_sweep-inl.h"
-#include "mirror/art_field.h"
#include "mirror/art_field-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
-#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
-#include "mirror/object_array.h"
-#include "mirror/object_array-inl.h"
#include "runtime.h"
+#include "scoped_thread_state_change.h"
#include "thread-inl.h"
#include "thread_list.h"
-#include "verifier/method_verifier.h"
using ::art::mirror::ArtField;
using ::art::mirror::Class;
@@ -163,49 +152,38 @@ void MarkSweep::PreProcessReferences() {
}
}
-void MarkSweep::HandleDirtyObjectsPhase() {
- TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_);
+void MarkSweep::PausePhase() {
+ TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_);
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
-
- {
+ if (IsConcurrent()) {
+ // Handle the dirty objects if we are a concurrent GC.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-
// Re-mark root set.
ReMarkRoots();
-
// Scan dirty objects, this is only required if we are not doing concurrent GC.
RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
}
-
ProcessReferences(self);
-
- // Only need to do this if we have the card mark verification on, and only during concurrent GC.
- if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
- GetHeap()->verify_post_gc_heap_) {
+ {
+ timings_.NewSplit("SwapStacks");
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- // This second sweep makes sure that we don't have any objects in the live stack which point to
- // freed objects. These cause problems since their references may be previously freed objects.
- SweepArray(GetHeap()->allocation_stack_.get(), false);
- // Since SweepArray() above resets the (active) allocation
- // stack. Need to revoke the thread-local allocation stacks that
- // point into it.
+ heap_->SwapStacks(self);
+ live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
+ // Need to revoke all the thread local allocation stacks since we just swapped the allocation
+ // stacks and don't want anybody to allocate into the live stack.
RevokeAllThreadLocalAllocationStacks(self);
}
-
timings_.StartSplit("PreSweepingGcVerification");
heap_->PreSweepingGcVerification(this);
timings_.EndSplit();
-
- // Ensure that nobody inserted items in the live stack after we swapped the stacks.
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
-
- // Disallow new system weaks to prevent a race which occurs when someone adds a new system
- // weak before we sweep them. Since this new system weak may not be marked, the GC may
- // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
- // reference to a string that is about to be swept.
- Runtime::Current()->DisallowNewSystemWeaks();
+ if (IsConcurrent()) {
+ // Disallow new system weaks to prevent a race which occurs when someone adds a new system
+ // weak before we sweep them. Since this new system weak may not be marked, the GC may
+ // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
+ // reference to a string that is about to be swept.
+ Runtime::Current()->DisallowNewSystemWeaks();
+ }
}
void MarkSweep::PreCleanCards() {
@@ -227,8 +205,7 @@ void MarkSweep::PreCleanCards() {
// reference write are visible to the GC before the card is scanned (this is due to locks being
// acquired / released in the checkpoint code).
// The other roots are also marked to help reduce the pause.
- MarkThreadRoots(self);
- // TODO: Only mark the dirty roots.
+ MarkRootsCheckpoint(self, false);
MarkNonThreadRoots();
MarkConcurrentRoots(
static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
@@ -241,6 +218,7 @@ void MarkSweep::PreCleanCards() {
void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
if (kUseThreadLocalAllocationStack) {
+ timings_.NewSplit("RevokeAllThreadLocalAllocationStacks");
Locks::mutator_lock_->AssertExclusiveHeld(self);
heap_->RevokeAllThreadLocalAllocationStacks(self);
}
@@ -256,15 +234,8 @@ void MarkSweep::MarkingPhase() {
// Process dirty cards and add dirty cards to mod union tables.
heap_->ProcessCards(timings_, false);
- // Need to do this before the checkpoint since we don't want any threads to add references to
- // the live stack during the recursive mark.
- timings_.NewSplit("SwapStacks");
- heap_->SwapStacks(self);
-
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
MarkRoots(self);
- live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
- UpdateAndMarkModUnion();
MarkReachableObjects();
// Pre-clean dirtied cards to reduce pauses.
PreCleanCards();
@@ -284,18 +255,8 @@ void MarkSweep::UpdateAndMarkModUnion() {
}
}
-void MarkSweep::MarkThreadRoots(Thread* self) {
- MarkRootsCheckpoint(self);
-}
-
void MarkSweep::MarkReachableObjects() {
- // Mark everything allocated since the last as GC live so that we can sweep concurrently,
- // knowing that new allocations won't be marked as live.
- timings_.StartSplit("MarkStackAsLive");
- accounting::ObjectStack* live_stack = heap_->GetLiveStack();
- heap_->MarkAllocStackAsLive(live_stack);
- live_stack->Reset();
- timings_.EndSplit();
+ UpdateAndMarkModUnion();
// Recursively mark all the non-image bits set in the mark bitmap.
RecursiveMark();
}
@@ -303,44 +264,10 @@ void MarkSweep::MarkReachableObjects() {
void MarkSweep::ReclaimPhase() {
TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Thread* self = Thread::Current();
-
- if (!IsConcurrent()) {
- ProcessReferences(self);
- }
-
- {
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- SweepSystemWeaks();
- }
-
+ SweepSystemWeaks(self);
if (IsConcurrent()) {
Runtime::Current()->AllowNewSystemWeaks();
-
- TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
- if (!kPreCleanCards) {
- // The allocation stack contains things allocated since the start of the GC. These may have
- // been marked during this GC meaning they won't be eligible for reclaiming in the next
- // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next
- // sticky GC.
- // There is a race here which is safely handled. Another thread such as the hprof could
- // have flushed the alloc stack after we resumed the threads. This is safe however, since
- // reseting the allocation stack zeros it out with madvise. This means that we will either
- // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
- // first place.
- // We can't do this if we pre-clean cards since we will unmark objects which are no longer on
- // a dirty card since we aged cards during the pre-cleaning process.
- mirror::Object** end = allocation_stack->End();
- for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
- const Object* obj = *it;
- if (obj != nullptr) {
- UnMarkObjectNonNull(obj);
- }
- }
- }
}
-
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -414,28 +341,6 @@ void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*
reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
}
-inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
- DCHECK(!immune_region_.ContainsObject(obj));
- if (kUseBrooksPointer) {
- // Verify all the objects have the correct Brooks pointer installed.
- obj->AssertSelfBrooksPointer();
- }
- // Try to take advantage of locality of references within a space, failing this find the space
- // the hard way.
- accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
- if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
- if (LIKELY(new_bitmap != NULL)) {
- object_bitmap = new_bitmap;
- } else {
- MarkLargeObject(obj, false);
- return;
- }
- }
- DCHECK(object_bitmap->HasAddress(obj));
- object_bitmap->Clear(obj);
-}
-
inline void MarkSweep::MarkObjectNonNull(Object* obj) {
DCHECK(obj != nullptr);
if (kUseBrooksPointer) {
@@ -590,7 +495,7 @@ void MarkSweep::MarkRoots(Thread* self) {
timings_.EndSplit();
RevokeAllThreadLocalAllocationStacks(self);
} else {
- MarkThreadRoots(self);
+ MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
// At this point the live stack should no longer have any mutators which push into it.
MarkNonThreadRoots();
MarkConcurrentRoots(
@@ -1020,10 +925,10 @@ void MarkSweep::ReMarkRoots() {
}
}
-void MarkSweep::SweepSystemWeaks() {
- Runtime* runtime = Runtime::Current();
+void MarkSweep::SweepSystemWeaks(Thread* self) {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
timings_.StartSplit("SweepSystemWeaks");
- runtime->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
timings_.EndSplit();
}
@@ -1034,14 +939,13 @@ mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg
}
void MarkSweep::VerifyIsLive(const Object* obj) {
- Heap* heap = GetHeap();
- if (!heap->GetLiveBitmap()->Test(obj)) {
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ if (!heap_->GetLiveBitmap()->Test(obj)) {
+ space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
if (!large_object_space->GetLiveObjects()->Test(obj)) {
- if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
- heap->allocation_stack_->End()) {
+ if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
+ heap_->allocation_stack_->End()) {
// Object not found!
- heap->DumpSpaces();
+ heap_->DumpSpaces();
LOG(FATAL) << "Found dead object " << obj;
}
}
@@ -1055,9 +959,14 @@ void MarkSweep::VerifySystemWeaks() {
class CheckpointMarkThreadRoots : public Closure {
public:
- explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
+ explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
+ bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
+ : mark_sweep_(mark_sweep),
+ revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
+ revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
+ }
- virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
ATRACE_BEGIN("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
@@ -1065,21 +974,22 @@ class CheckpointMarkThreadRoots : public Closure {
<< thread->GetState() << " thread " << thread << " self " << self;
thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
ATRACE_END();
- if (kUseThreadLocalAllocationStack) {
- thread->RevokeThreadLocalAllocationStack();
- }
- if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint) {
+ if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
+ ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
+ ATRACE_END();
}
mark_sweep_->GetBarrier().Pass(self);
}
private:
- MarkSweep* mark_sweep_;
+ MarkSweep* const mark_sweep_;
+ const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
};
-void MarkSweep::MarkRootsCheckpoint(Thread* self) {
- CheckpointMarkThreadRoots check_point(this);
+void MarkSweep::MarkRootsCheckpoint(Thread* self,
+ bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
+ CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
timings_.StartSplit("MarkRootsCheckpoint");
ThreadList* thread_list = Runtime::Current()->GetThreadList();
// Request the check point is run on all threads returning a count of the threads that must
@@ -1089,10 +999,10 @@ void MarkSweep::MarkRootsCheckpoint(Thread* self) {
// TODO: optimize to not release locks when there are no threads to wait for.
Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
Locks::mutator_lock_->SharedUnlock(self);
- ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
- CHECK_EQ(old_state, kWaitingPerformingGc);
- gc_barrier_->Increment(self, barrier_count);
- self->SetState(kWaitingPerformingGc);
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ gc_barrier_->Increment(self, barrier_count);
+ }
Locks::mutator_lock_->SharedLock(self);
Locks::heap_bitmap_lock_->ExclusiveLock(self);
timings_.EndSplit();
@@ -1108,7 +1018,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
size_t freed_objects = 0;
size_t freed_large_objects = 0;
// How many objects are left in the array, modified after each space is swept.
- Object** objects = const_cast<Object**>(allocations->Begin());
+ Object** objects = allocations->Begin();
size_t count = allocations->Size();
// Change the order to ensure that the non-moving space last swept as an optimization.
std::vector<space::ContinuousSpace*> sweep_spaces;
@@ -1206,6 +1116,16 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
}
void MarkSweep::Sweep(bool swap_bitmaps) {
+ // Ensure that nobody inserted items in the live stack after we swapped the stacks.
+ CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
+ // Mark everything allocated since the last as GC live so that we can sweep concurrently,
+ // knowing that new allocations won't be marked as live.
+ timings_.StartSplit("MarkStackAsLive");
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ timings_.EndSplit();
+
DCHECK(mark_stack_->IsEmpty());
TimingLogger::ScopedSplit("Sweep", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -1326,7 +1246,7 @@ void MarkSweep::ProcessMarkStack(bool paused) {
}
obj = mark_stack_->PopBack();
}
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
ScanObject(obj);
}
}
@@ -1347,14 +1267,8 @@ inline bool MarkSweep::IsMarked(const Object* object) const
void MarkSweep::FinishPhase() {
TimingLogger::ScopedSplit split("FinishPhase", &timings_);
// Can't enqueue references if we hold the mutator lock.
- Heap* heap = GetHeap();
timings_.NewSplit("PostGcVerification");
- heap->PostGcVerification(this);
- // Update the cumulative statistics.
- total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
- total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
- // Ensure that the mark stack is empty.
- CHECK(mark_stack_->IsEmpty());
+ heap_->PostGcVerification(this);
if (kCountScannedTypes) {
VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
<< " other=" << other_count_;
@@ -1375,22 +1289,10 @@ void MarkSweep::FinishPhase() {
VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_
<< " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_;
}
- // Update the cumulative loggers.
- cumulative_timings_.Start();
- cumulative_timings_.AddLogger(timings_);
- cumulative_timings_.End();
- // Clear all of the spaces' mark bitmaps.
- for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
- if (bitmap != nullptr &&
- space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
- bitmap->Clear();
- }
- }
+ CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
mark_stack_->Reset();
- // Reset the marked large objects.
- space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
- large_objects->GetMarkObjects()->Clear();
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ heap_->ClearMarkedObjects();
}
void MarkSweep::RevokeAllThreadLocalBuffers() {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 84b775a..f1fd546 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -32,33 +32,22 @@ namespace art {
namespace mirror {
class Class;
class Object;
- template<class T> class ObjectArray;
class Reference;
} // namespace mirror
-class StackVisitor;
class Thread;
enum VisitRootFlags : uint8_t;
namespace gc {
+class Heap;
+
namespace accounting {
- template <typename T> class AtomicStack;
- class MarkIfReachesAllocspaceVisitor;
- class ModUnionClearCardVisitor;
- class ModUnionVisitor;
- class ModUnionTableBitmap;
- class MarkStackChunk;
+ template<typename T> class AtomicStack;
typedef AtomicStack<mirror::Object*> ObjectStack;
class SpaceBitmap;
} // namespace accounting
-namespace space {
- class ContinuousSpace;
-} // namespace space
-
-class Heap;
-
namespace collector {
class MarkSweep : public GarbageCollector {
@@ -69,7 +58,7 @@ class MarkSweep : public GarbageCollector {
virtual void InitializePhase() OVERRIDE;
virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual void HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void PausePhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void MarkReachableObjects()
@@ -107,7 +96,7 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void MarkRootsCheckpoint(Thread* self)
+ void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -137,8 +126,8 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Update and mark references from immune spaces. Virtual as overridden by StickyMarkSweep.
- virtual void UpdateAndMarkModUnion()
+ // Update and mark references from immune spaces.
+ void UpdateAndMarkModUnion()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Pre clean cards to reduce how much work is needed in the pause.
@@ -170,8 +159,8 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void SweepSystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ void SweepSystemWeaks(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -235,17 +224,6 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a
- // space set, removing the object from the set.
- void UnMarkObjectNonNull(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- // Mark the vm thread roots.
- void MarkThreadRoots(Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj);
@@ -311,7 +289,7 @@ class MarkSweep : public GarbageCollector {
accounting::ObjectStack* mark_stack_;
- // Immune range, every object inside the immune range is assumed to be marked.
+ // Immune region, every object inside the immune range is assumed to be marked.
ImmuneRegion immune_region_;
// Parallel finger.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index d4f47ef..222bd63 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -25,7 +25,7 @@
#include "base/macros.h"
#include "base/mutex-inl.h"
#include "base/timing_logger.h"
-#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
@@ -726,8 +726,8 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
return obj;
}
if (from_space_->HasAddress(obj)) {
- mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
- return forwarding_address; // Returns either the forwarding address or nullptr.
+ // Returns either the forwarding address or nullptr.
+ return GetForwardingAddressInFromSpace(obj);
} else if (to_space_->HasAddress(obj)) {
// Should be unlikely.
// Already forwarded, must be marked.
@@ -751,38 +751,12 @@ void SemiSpace::FinishPhase() {
Heap* heap = GetHeap();
timings_.NewSplit("PostGcVerification");
heap->PostGcVerification(this);
-
// Null the "to" and "from" spaces since compacting from one to the other isn't valid until
// further action is done by the heap.
to_space_ = nullptr;
from_space_ = nullptr;
-
- // Update the cumulative statistics
- total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
- total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
-
- // Ensure that the mark stack is empty.
CHECK(mark_stack_->IsEmpty());
-
- // Update the cumulative loggers.
- cumulative_timings_.Start();
- cumulative_timings_.AddLogger(timings_);
- cumulative_timings_.End();
-
- // Clear all of the spaces' mark bitmaps.
- for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
- if (bitmap != nullptr &&
- space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
- bitmap->Clear();
- }
- }
mark_stack_->Reset();
-
- // Reset the marked large objects.
- space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
- large_objects->GetMarkObjects()->Clear();
-
if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
@@ -800,6 +774,9 @@ void SemiSpace::FinishPhase() {
whole_heap_collection_ = false;
}
}
+ // Clear all of the spaces' mark bitmaps.
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ heap_->ClearMarkedObjects();
}
void SemiSpace::RevokeAllThreadLocalBuffers() {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 52b53aa..f067cb2 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -28,37 +28,28 @@
namespace art {
+class Thread;
+
namespace mirror {
class Class;
class Object;
- template<class T> class ObjectArray;
} // namespace mirror
-class StackVisitor;
-class Thread;
-
namespace gc {
+class Heap;
+
namespace accounting {
template <typename T> class AtomicStack;
- class MarkIfReachesAllocspaceVisitor;
- class ModUnionClearCardVisitor;
- class ModUnionVisitor;
- class ModUnionTableBitmap;
- class MarkStackChunk;
typedef AtomicStack<mirror::Object*> ObjectStack;
class SpaceBitmap;
} // namespace accounting
namespace space {
- class BumpPointerSpace;
class ContinuousMemMapAllocSpace;
class ContinuousSpace;
- class MallocSpace;
} // namespace space
-class Heap;
-
namespace collector {
class SemiSpace : public GarbageCollector {
@@ -189,12 +180,6 @@ class SemiSpace : public GarbageCollector {
void ProcessMarkStack()
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
- mirror::Object** weak_references,
- mirror::Object** finalizer_references,
- mirror::Object** phantom_references)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
// Revoke all the thread-local buffers.
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 934b1bd..4f9dabf 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -46,10 +46,6 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Don't need to do anything special here since we scan all the cards which may have references
- // to the newly allocated objects.
- void UpdateAndMarkModUnion() OVERRIDE { }
-
private:
DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
};
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 98c27fb..c0a6b6a 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -36,6 +36,8 @@ enum CollectorType {
kCollectorTypeGSS,
// Heap trimming collector, doesn't do any actual collecting.
kCollectorTypeHeapTrim,
+ // A (mostly) concurrent copying collector.
+ kCollectorTypeCC,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 02e7e3f..1a32a9a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -35,6 +35,7 @@
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/collector/concurrent_copying.h"
#include "gc/collector/mark_sweep-inl.h"
#include "gc/collector/partial_mark_sweep.h"
#include "gc/collector/semi_space.h"
@@ -328,6 +329,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
semi_space_collector_ = new collector::SemiSpace(this, generational);
garbage_collectors_.push_back(semi_space_collector_);
+
+ concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
+ garbage_collectors_.push_back(concurrent_copying_collector_);
}
if (running_on_valgrind_) {
@@ -658,9 +662,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
for (const auto& collector : garbage_collectors_) {
- CumulativeLogger& logger = collector->GetCumulativeTimings();
+ const CumulativeLogger& logger = collector->GetCumulativeTimings();
if (logger.GetTotalNs() != 0) {
- os << Dumpable<CumulativeLogger>(logger);
+ os << ConstDumpable<CumulativeLogger>(logger);
const uint64_t total_ns = logger.GetTotalNs();
const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
@@ -1153,13 +1157,29 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
}
+ collector::GcType tried_type = next_gc_type_;
+ if (ptr == nullptr) {
+ const bool gc_ran =
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ *klass = sirt_klass.get();
+ return nullptr;
+ }
+ if (gc_ran) {
+ ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ }
+ }
+
// Loop through our different Gc types and try to Gc until we get enough free memory.
for (collector::GcType gc_type : gc_plan_) {
if (ptr != nullptr) {
break;
}
+ if (gc_type == tried_type) {
+ continue;
+ }
// Attempt to run the collector, if we succeed, re-try the allocation.
- bool gc_ran =
+ const bool gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
*klass = sirt_klass.get();
@@ -1430,7 +1450,8 @@ void Heap::TransitionCollector(CollectorType collector_type) {
break;
}
default: {
- LOG(FATAL) << "Attempted to transition to invalid collector type";
+ LOG(FATAL) << "Attempted to transition to invalid collector type "
+ << static_cast<size_t>(collector_type);
break;
}
}
@@ -1440,7 +1461,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
// Can't call into java code with all threads suspended.
EnqueueClearedReferences();
uint64_t duration = NanoTime() - start_time;
- GrowForUtilization(collector::kGcTypeFull, duration);
+ GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
int32_t after_size = GetTotalMemory();
int32_t delta_size = before_size - after_size;
@@ -1460,6 +1481,7 @@ void Heap::ChangeCollector(CollectorType collector_type) {
collector_type_ = collector_type;
gc_plan_.clear();
switch (collector_type_) {
+ case kCollectorTypeCC: // Fall-through.
case kCollectorTypeSS: // Fall-through.
case kCollectorTypeGSS: {
gc_plan_.push_back(collector::kGcTypeFull);
@@ -1812,22 +1834,23 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
if (compacting_gc) {
DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
current_allocator_ == kAllocatorTypeTLAB);
- gc_type = semi_space_collector_->GetGcType();
- CHECK(temp_space_->IsEmpty());
- semi_space_collector_->SetFromSpace(bump_pointer_space_);
- semi_space_collector_->SetToSpace(temp_space_);
+ if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
+ gc_type = semi_space_collector_->GetGcType();
+ semi_space_collector_->SetFromSpace(bump_pointer_space_);
+ semi_space_collector_->SetToSpace(temp_space_);
+ collector = semi_space_collector_;
+ } else if (collector_type_ == kCollectorTypeCC) {
+ gc_type = concurrent_copying_collector_->GetGcType();
+ collector = concurrent_copying_collector_;
+ } else {
+ LOG(FATAL) << "Unreachable - invalid collector type " << static_cast<size_t>(collector_type_);
+ }
temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
- collector = semi_space_collector_;
+ CHECK(temp_space_->IsEmpty());
gc_type = collector::kGcTypeFull;
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
current_allocator_ == kAllocatorTypeDlMalloc) {
- for (const auto& cur_collector : garbage_collectors_) {
- if (cur_collector->GetCollectorType() == collector_type_ &&
- cur_collector->GetGcType() == gc_type) {
- collector = cur_collector;
- break;
- }
- }
+ collector = FindCollectorByGcType(gc_type);
} else {
LOG(FATAL) << "Invalid current allocator " << current_allocator_;
}
@@ -1838,14 +1861,14 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
if (!clear_soft_references) {
clear_soft_references = gc_type != collector::kGcTypeSticky; // TODO: GSS?
}
- collector->Run(gc_cause, clear_soft_references || Runtime::Current()->IsZygote());
+ collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
RequestHeapTrim();
// Enqueue cleared references.
EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
- GrowForUtilization(gc_type, collector->GetDurationNs());
+ GrowForUtilization(collector);
if (CareAboutPauseTimes()) {
const size_t duration = collector->GetDurationNs();
std::vector<uint64_t> pauses = collector->GetPauseTimes();
@@ -1874,9 +1897,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
<< percent_free << "% free, " << PrettySize(current_heap_size) << "/"
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
- if (VLOG_IS_ON(heap)) {
- LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
- }
+ VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
}
}
FinishGC(self, gc_type);
@@ -2479,13 +2500,24 @@ void Heap::UpdateMaxNativeFootprint() {
native_footprint_limit_ = 2 * target_size - native_size;
}
-void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
+collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
+ for (const auto& collector : garbage_collectors_) {
+ if (collector->GetCollectorType() == collector_type_ &&
+ collector->GetGcType() == gc_type) {
+ return collector;
+ }
+ }
+ return nullptr;
+}
+
+void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
const size_t bytes_allocated = GetBytesAllocated();
last_gc_size_ = bytes_allocated;
last_gc_time_ns_ = NanoTime();
size_t target_size;
+ collector::GcType gc_type = collector_ran->GetGcType();
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
target_size = bytes_allocated / GetTargetHeapUtilization();
@@ -2497,12 +2529,22 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
- // Based on how close the current heap size is to the target size, decide
- // whether or not to do a partial or sticky GC next.
- if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
+ collector::GcType non_sticky_gc_type =
+ have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ // Find what the next non sticky collector will be.
+ collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
+ // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
+ // do another sticky collection next.
+ // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
+ // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
+ // if the sticky GC throughput always remained >= the full/partial throughput.
+ if (collector_ran->GetEstimatedLastIterationThroughput() >=
+ non_sticky_collector->GetEstimatedMeanThroughput() &&
+ non_sticky_collector->GetIterations() > 0 &&
+ bytes_allocated <= max_allowed_footprint_) {
next_gc_type_ = collector::kGcTypeSticky;
} else {
- next_gc_type_ = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ next_gc_type_ = non_sticky_gc_type;
}
// If we have freed enough memory, shrink the heap back down.
if (bytes_allocated + max_free_ < max_allowed_footprint_) {
@@ -2516,7 +2558,7 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
if (IsGcConcurrent()) {
// Calculate when to perform the next ConcurrentGC.
// Calculate the estimated GC duration.
- const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
+ const double gc_duration_seconds = NsToMs(collector_ran->GetDurationNs()) / 1000.0;
// Estimate how many remaining bytes we will have when we need to start the next GC.
size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
@@ -2808,5 +2850,19 @@ void Heap::RemoveRememberedSet(space::Space* space) {
CHECK(remembered_sets_.find(space) == remembered_sets_.end());
}
+void Heap::ClearMarkedObjects() {
+ // Clear all of the spaces' mark bitmaps.
+ for (const auto& space : GetContinuousSpaces()) {
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ if (space->GetLiveBitmap() != mark_bitmap) {
+ mark_bitmap->Clear();
+ }
+ }
+ // Clear the marked objects in the discontinous space object sets.
+ for (const auto& space : GetDiscontinuousSpaces()) {
+ space->GetMarkObjects()->Clear();
+ }
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 60b8450..5879757 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -60,6 +60,7 @@ namespace accounting {
} // namespace accounting
namespace collector {
+ class ConcurrentCopying;
class GarbageCollector;
class MarkSweep;
class SemiSpace;
@@ -254,6 +255,9 @@ class Heap {
void IncrementDisableMovingGC(Thread* self);
void DecrementDisableMovingGC(Thread* self);
+ // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
+ void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
// Initiates an explicit garbage collection.
void CollectGarbage(bool clear_soft_references);
@@ -575,7 +579,8 @@ class Heap {
return AllocatorHasAllocationStack(allocator_type);
}
static bool IsCompactingGC(CollectorType collector_type) {
- return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS;
+ return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
+ collector_type == kCollectorTypeCC;
}
bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -658,10 +663,13 @@ class Heap {
// bytes allocated and the target utilization ratio.
void UpdateMaxNativeFootprint();
+ // Find a collector based on GC type.
+ collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
+
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
- void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration);
+ void GrowForUtilization(collector::GarbageCollector* collector_ran);
size_t GetPercentFree();
@@ -688,7 +696,7 @@ class Heap {
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
bool IsGcConcurrent() const ALWAYS_INLINE {
- return collector_type_ == kCollectorTypeCMS;
+ return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
}
// All-known continuous spaces, where objects lie within fixed bounds.
@@ -932,6 +940,7 @@ class Heap {
std::vector<collector::GarbageCollector*> garbage_collectors_;
collector::SemiSpace* semi_space_collector_;
+ collector::ConcurrentCopying* concurrent_copying_collector_;
const bool running_on_valgrind_;
const bool use_tlab_;