diff options
-rw-r--r-- | runtime/gc/collector/garbage_collector.cc | 99 | ||||
-rw-r--r-- | runtime/gc/collector/garbage_collector.h | 10 | ||||
-rw-r--r-- | runtime/gc/collector/mark_sweep.cc | 7 | ||||
-rw-r--r-- | runtime/gc/collector/mark_sweep.h | 10 | ||||
-rw-r--r-- | runtime/gc/collector/semi_space.h | 6 | ||||
-rw-r--r-- | runtime/gc/heap-inl.h | 6 | ||||
-rw-r--r-- | runtime/gc/heap.cc | 19 | ||||
-rw-r--r-- | runtime/gc/heap.h | 10 |
8 files changed, 88 insertions, 79 deletions
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc index 1e1e447..b190dab 100644 --- a/runtime/gc/collector/garbage_collector.cc +++ b/runtime/gc/collector/garbage_collector.cc @@ -47,9 +47,8 @@ GarbageCollector::GarbageCollector(Heap* heap, const std::string& name) ResetCumulativeStatistics(); } -bool GarbageCollector::HandleDirtyObjectsPhase() { - DCHECK(IsConcurrent()); - return true; +void GarbageCollector::HandleDirtyObjectsPhase() { + LOG(FATAL) << "Unreachable"; } void GarbageCollector::RegisterPause(uint64_t nano_length) { @@ -85,50 +84,56 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) { freed_objects_ = 0; freed_large_objects_ = 0; - InitializePhase(); - - if (!IsConcurrent()) { - // Pause is the entire length of the GC. - uint64_t pause_start = NanoTime(); - ATRACE_BEGIN("Application threads suspended"); - // Mutator lock may be already exclusively held when we do garbage collections for changing the - // current collector / allocator during process state updates. - if (Locks::mutator_lock_->IsExclusiveHeld(self)) { - // PreGcRosAllocVerification() is called in Heap::TransitionCollector(). - RevokeAllThreadLocalBuffers(); - MarkingPhase(); - ReclaimPhase(); - // PostGcRosAllocVerification() is called in Heap::TransitionCollector(). - } else { - thread_list->SuspendAll(); - GetHeap()->PreGcRosAllocVerification(&timings_); - RevokeAllThreadLocalBuffers(); - MarkingPhase(); - ReclaimPhase(); - GetHeap()->PostGcRosAllocVerification(&timings_); - thread_list->ResumeAll(); - } - ATRACE_END(); - RegisterPause(NanoTime() - pause_start); - } else { - CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); - Thread* self = Thread::Current(); - { - ReaderMutexLock mu(self, *Locks::mutator_lock_); - MarkingPhase(); + CollectorType collector_type = GetCollectorType(); + switch (collector_type) { + case kCollectorTypeMS: // Fall through. + case kCollectorTypeSS: // Fall through. + case kCollectorTypeGSS: { + InitializePhase(); + // Pause is the entire length of the GC. + uint64_t pause_start = NanoTime(); + ATRACE_BEGIN("Application threads suspended"); + // Mutator lock may be already exclusively held when we do garbage collections for changing the + // current collector / allocator during process state updates. + if (Locks::mutator_lock_->IsExclusiveHeld(self)) { + // PreGcRosAllocVerification() is called in Heap::TransitionCollector(). + RevokeAllThreadLocalBuffers(); + MarkingPhase(); + ReclaimPhase(); + // PostGcRosAllocVerification() is called in Heap::TransitionCollector(). + } else { + ATRACE_BEGIN("Suspending mutator threads"); + thread_list->SuspendAll(); + ATRACE_END(); + GetHeap()->PreGcRosAllocVerification(&timings_); + RevokeAllThreadLocalBuffers(); + MarkingPhase(); + ReclaimPhase(); + GetHeap()->PostGcRosAllocVerification(&timings_); + ATRACE_BEGIN("Resuming mutator threads"); + thread_list->ResumeAll(); + ATRACE_END(); + } + ATRACE_END(); + RegisterPause(NanoTime() - pause_start); + FinishPhase(); + break; } - bool done = false; - while (!done) { + case kCollectorTypeCMS: { + InitializePhase(); + CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); + { + ReaderMutexLock mu(self, *Locks::mutator_lock_); + MarkingPhase(); + } uint64_t pause_start = NanoTime(); ATRACE_BEGIN("Suspending mutator threads"); thread_list->SuspendAll(); ATRACE_END(); ATRACE_BEGIN("All mutator threads suspended"); GetHeap()->PreGcRosAllocVerification(&timings_); - done = HandleDirtyObjectsPhase(); - if (done) { - RevokeAllThreadLocalBuffers(); - } + HandleDirtyObjectsPhase(); + RevokeAllThreadLocalBuffers(); GetHeap()->PostGcRosAllocVerification(&timings_); ATRACE_END(); uint64_t pause_end = NanoTime(); @@ -136,13 +141,19 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) { thread_list->ResumeAll(); ATRACE_END(); RegisterPause(pause_end - pause_start); + { + ReaderMutexLock mu(self, *Locks::mutator_lock_); + ReclaimPhase(); + } + FinishPhase(); + break; } - { - ReaderMutexLock mu(self, *Locks::mutator_lock_); - ReclaimPhase(); + default: { + LOG(FATAL) << "Unreachable collector type=" << static_cast<size_t>(collector_type); + break; } } - FinishPhase(); + uint64_t end_time = NanoTime(); duration_ns_ = end_time - start_time; total_time_ns_ += GetDurationNs(); diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 8259cf0..2182430 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -20,6 +20,7 @@ #include "base/histogram.h" #include "base/mutex.h" #include "base/timing_logger.h" +#include "gc/collector_type.h" #include "gc/gc_cause.h" #include "gc_type.h" #include <stdint.h> @@ -34,9 +35,6 @@ namespace collector { class GarbageCollector { public: - // Returns true iff the garbage collector is concurrent. - virtual bool IsConcurrent() const = 0; - GarbageCollector(Heap* heap, const std::string& name); virtual ~GarbageCollector() { } @@ -46,6 +44,8 @@ class GarbageCollector { virtual GcType GetGcType() const = 0; + virtual CollectorType GetCollectorType() const = 0; + // Run the garbage collector. void Run(GcCause gc_cause, bool clear_soft_references); @@ -118,8 +118,8 @@ class GarbageCollector { // Mark all reachable objects, done concurrently. virtual void MarkingPhase() = 0; - // Only called for concurrent GCs. Gets called repeatedly until it succeeds. - virtual bool HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Only called for concurrent GCs. + virtual void HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Called with mutators running. virtual void ReclaimPhase() = 0; diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 8372734..3e5fbcd 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -151,7 +151,7 @@ void MarkSweep::PreProcessReferences(Thread* self) { &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); } -bool MarkSweep::HandleDirtyObjectsPhase() { +void MarkSweep::HandleDirtyObjectsPhase() { TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_); Thread* self = Thread::Current(); Locks::mutator_lock_->AssertExclusiveHeld(self); @@ -194,11 +194,6 @@ bool MarkSweep::HandleDirtyObjectsPhase() { // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong // reference to a string that is about to be swept. Runtime::Current()->DisallowNewSystemWeaks(); - return true; -} - -bool MarkSweep::IsConcurrent() const { - return is_concurrent_; } void MarkSweep::PreCleanCards() { diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index b117b20..937d726 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h @@ -68,19 +68,25 @@ class MarkSweep : public GarbageCollector { virtual void InitializePhase() OVERRIDE; virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - virtual bool HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); virtual void MarkReachableObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - virtual bool IsConcurrent() const OVERRIDE; + bool IsConcurrent() const { + return is_concurrent_; + } virtual GcType GetGcType() const OVERRIDE { return kGcTypeFull; } + virtual CollectorType GetCollectorType() const OVERRIDE { + return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS; + } + // Initializes internal structures. void Init(); diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index 08bfbc4..34cc1d3 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -72,9 +72,6 @@ class SemiSpace : public GarbageCollector { ~SemiSpace() {} virtual void InitializePhase(); - virtual bool IsConcurrent() const { - return false; - } virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -83,6 +80,9 @@ class SemiSpace : public GarbageCollector { virtual GcType GetGcType() const { return kGcTypePartial; } + virtual CollectorType GetCollectorType() const OVERRIDE { + return generational_ ? kCollectorTypeGSS : kCollectorTypeSS; + } // Sets which space we will be copying objects to. void SetToSpace(space::ContinuousMemMapAllocSpace* to_space); diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 6cc44c9..8bfe793 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -118,11 +118,11 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas } else { DCHECK(!Dbg::IsAllocTrackingEnabled()); } - // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for + // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since // the allocator_type should be constant propagated. - if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) { + if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) { CheckConcurrentGC(self, new_num_bytes_allocated, &obj); } VerifyObject(obj); @@ -276,7 +276,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t if (UNLIKELY(new_footprint > growth_limit_)) { return true; } - if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) { + if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) { if (!kGrow) { return true; } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index a763e37..20416ec 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -88,7 +88,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max rosalloc_space_(nullptr), dlmalloc_space_(nullptr), main_space_(nullptr), - concurrent_gc_(false), collector_type_(kCollectorTypeNone), post_zygote_collector_type_(post_zygote_collector_type), background_collector_type_(background_collector_type), @@ -1443,10 +1442,8 @@ void Heap::ChangeCollector(CollectorType collector_type) { collector_type_ = collector_type; gc_plan_.clear(); switch (collector_type_) { - case kCollectorTypeSS: - // Fall-through. + case kCollectorTypeSS: // Fall-through. case kCollectorTypeGSS: { - concurrent_gc_ = false; gc_plan_.push_back(collector::kGcTypeFull); if (use_tlab_) { ChangeAllocator(kAllocatorTypeTLAB); @@ -1456,7 +1453,6 @@ void Heap::ChangeCollector(CollectorType collector_type) { break; } case kCollectorTypeMS: { - concurrent_gc_ = false; gc_plan_.push_back(collector::kGcTypeSticky); gc_plan_.push_back(collector::kGcTypePartial); gc_plan_.push_back(collector::kGcTypeFull); @@ -1464,7 +1460,6 @@ void Heap::ChangeCollector(CollectorType collector_type) { break; } case kCollectorTypeCMS: { - concurrent_gc_ = true; gc_plan_.push_back(collector::kGcTypeSticky); gc_plan_.push_back(collector::kGcTypePartial); gc_plan_.push_back(collector::kGcTypeFull); @@ -1475,7 +1470,7 @@ void Heap::ChangeCollector(CollectorType collector_type) { LOG(FATAL) << "Unimplemented"; } } - if (concurrent_gc_) { + if (IsGcConcurrent()) { concurrent_start_bytes_ = std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes; } else { @@ -1809,7 +1804,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus } else if (current_allocator_ == kAllocatorTypeRosAlloc || current_allocator_ == kAllocatorTypeDlMalloc) { for (const auto& cur_collector : garbage_collectors_) { - if (cur_collector->IsConcurrent() == concurrent_gc_ && + if (cur_collector->GetCollectorType() == collector_type_ && cur_collector->GetGcType() == gc_type) { collector = cur_collector; break; @@ -1819,8 +1814,8 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus LOG(FATAL) << "Invalid current allocator " << current_allocator_; } CHECK(collector != nullptr) - << "Could not find garbage collector with concurrent=" << concurrent_gc_ - << " and type=" << gc_type; + << "Could not find garbage collector with collector_type=" + << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type; ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str()); if (!clear_soft_references) { clear_soft_references = gc_type != collector::kGcTypeSticky; // TODO: GSS? @@ -2488,7 +2483,7 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { } if (!ignore_max_footprint_) { SetIdealFootprint(target_size); - if (concurrent_gc_) { + if (IsGcConcurrent()) { // Calculate when to perform the next ConcurrentGC. // Calculate the estimated GC duration. const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; @@ -2708,7 +2703,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) { // finalizers released native managed allocations. UpdateMaxNativeFootprint(); } else if (!IsGCRequestPending()) { - if (concurrent_gc_) { + if (IsGcConcurrent()) { RequestConcurrentGC(self); } else { CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index eb53ba9..5bf405d 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -680,6 +680,12 @@ class Heap { // Push an object onto the allocation stack. void PushOnAllocationStack(Thread* self, mirror::Object* obj); + // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark + // sweep GC, false for other GC types. + bool IsGcConcurrent() const ALWAYS_INLINE { + return collector_type_ == kCollectorTypeCMS; + } + // All-known continuous spaces, where objects lie within fixed bounds. std::vector<space::ContinuousSpace*> continuous_spaces_; @@ -722,10 +728,6 @@ class Heap { // The mem-map which we will use for the non-moving space after the zygote is done forking: UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_; - // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark - // sweep GC, false for other GC types. - bool concurrent_gc_; - // The current collector type. CollectorType collector_type_; // Which collector we will switch to after zygote fork. |