summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-03-27 10:55:04 -0700
committerMathieu Chartier <mathieuc@google.com>2014-03-27 15:24:44 -0700
commitafe4998fc15b8de093d6b282c9782d7182829e36 (patch)
treebcb64fa7db43ece36a3e492deab5f9c162d39aa5 /runtime/gc
parent67ad224199b9902b3e6d85737e03b679d1ab44f9 (diff)
downloadart-afe4998fc15b8de093d6b282c9782d7182829e36.zip
art-afe4998fc15b8de093d6b282c9782d7182829e36.tar.gz
art-afe4998fc15b8de093d6b282c9782d7182829e36.tar.bz2
Change sticky GC ergonomics to use GC throughput.
The old sticky ergonomics used partial/full GC when the bytes until the footprint limit was < min free. This was suboptimal. The new sticky GC ergonomics do partial/full GC when the throughput of the current sticky GC iteration is <= mean throughput of the partial/full GC. Total GC time on FormulaEvaluationActions.EvaluateAndApplyChanges. Before: 26.4s After: 24.8s No benchmark score change measured. Bug: 8788501 Change-Id: I90000305e93fd492a8ef5a06ec9620d830eaf90d
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/collector/garbage_collector.cc13
-rw-r--r--runtime/gc/collector/garbage_collector.h16
-rw-r--r--runtime/gc/heap.cc55
-rw-r--r--runtime/gc/heap.h5
4 files changed, 61 insertions, 28 deletions
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 65b5471..0c7565c 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -39,9 +39,8 @@ GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
name_(name),
gc_cause_(kGcCauseForAlloc),
clear_soft_references_(false),
- verbose_(VLOG_IS_ON(heap)),
duration_ns_(0),
- timings_(name_.c_str(), true, verbose_),
+ timings_(name_.c_str(), true, VLOG_IS_ON(heap)),
pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
cumulative_timings_(name) {
ResetCumulativeStatistics();
@@ -186,6 +185,16 @@ void GarbageCollector::SwapBitmaps() {
}
}
+uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
+ // Add 1ms to prevent possible division by 0.
+ return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
+}
+
+uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
+ // Add 1ms to prevent possible division by 0.
+ return (freed_bytes_ * 1000) / (NsToMs(GetDurationNs()) + 1);
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 93fd2ab..f8c4579 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -68,8 +68,7 @@ class GarbageCollector {
TimingLogger& GetTimings() {
return timings_;
}
-
- CumulativeLogger& GetCumulativeTimings() {
+ const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
@@ -111,6 +110,17 @@ class GarbageCollector {
return pause_histogram_;
}
+ // Returns the estimated throughput in bytes / second.
+ uint64_t GetEstimatedMeanThroughput() const;
+
+ // Returns the estimated throughput of the last GC iteration.
+ uint64_t GetEstimatedLastIterationThroughput() const;
+
+ // Returns how many GC iterations have been run.
+ size_t GetIterations() const {
+ return GetCumulativeTimings().GetIterations();
+ }
+
protected:
// The initial phase. Done without mutators paused.
virtual void InitializePhase() = 0;
@@ -140,8 +150,6 @@ class GarbageCollector {
GcCause gc_cause_;
bool clear_soft_references_;
- const bool verbose_;
-
uint64_t duration_ns_;
TimingLogger timings_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 02e7e3f..ff4b4ce 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -658,9 +658,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
for (const auto& collector : garbage_collectors_) {
- CumulativeLogger& logger = collector->GetCumulativeTimings();
+ const CumulativeLogger& logger = collector->GetCumulativeTimings();
if (logger.GetTotalNs() != 0) {
- os << Dumpable<CumulativeLogger>(logger);
+ os << ConstDumpable<CumulativeLogger>(logger);
const uint64_t total_ns = logger.GetTotalNs();
const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
@@ -1440,7 +1440,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
// Can't call into java code with all threads suspended.
EnqueueClearedReferences();
uint64_t duration = NanoTime() - start_time;
- GrowForUtilization(collector::kGcTypeFull, duration);
+ GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
int32_t after_size = GetTotalMemory();
int32_t delta_size = before_size - after_size;
@@ -1821,13 +1821,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
gc_type = collector::kGcTypeFull;
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
current_allocator_ == kAllocatorTypeDlMalloc) {
- for (const auto& cur_collector : garbage_collectors_) {
- if (cur_collector->GetCollectorType() == collector_type_ &&
- cur_collector->GetGcType() == gc_type) {
- collector = cur_collector;
- break;
- }
- }
+ collector = FindCollectorByGcType(gc_type);
} else {
LOG(FATAL) << "Invalid current allocator " << current_allocator_;
}
@@ -1838,14 +1832,14 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
if (!clear_soft_references) {
clear_soft_references = gc_type != collector::kGcTypeSticky; // TODO: GSS?
}
- collector->Run(gc_cause, clear_soft_references || Runtime::Current()->IsZygote());
+ collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
RequestHeapTrim();
// Enqueue cleared references.
EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
- GrowForUtilization(gc_type, collector->GetDurationNs());
+ GrowForUtilization(collector);
if (CareAboutPauseTimes()) {
const size_t duration = collector->GetDurationNs();
std::vector<uint64_t> pauses = collector->GetPauseTimes();
@@ -1874,9 +1868,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
<< percent_free << "% free, " << PrettySize(current_heap_size) << "/"
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
- if (VLOG_IS_ON(heap)) {
- LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
- }
+ VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
}
}
FinishGC(self, gc_type);
@@ -2479,13 +2471,24 @@ void Heap::UpdateMaxNativeFootprint() {
native_footprint_limit_ = 2 * target_size - native_size;
}
-void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
+collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
+ for (const auto& collector : garbage_collectors_) {
+ if (collector->GetCollectorType() == collector_type_ &&
+ collector->GetGcType() == gc_type) {
+ return collector;
+ }
+ }
+ return nullptr;
+}
+
+void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
const size_t bytes_allocated = GetBytesAllocated();
last_gc_size_ = bytes_allocated;
last_gc_time_ns_ = NanoTime();
size_t target_size;
+ collector::GcType gc_type = collector_ran->GetGcType();
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
target_size = bytes_allocated / GetTargetHeapUtilization();
@@ -2497,12 +2500,22 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
- // Based on how close the current heap size is to the target size, decide
- // whether or not to do a partial or sticky GC next.
- if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
+ collector::GcType non_sticky_gc_type =
+ have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ // Find what the next non sticky collector will be.
+ collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
+ // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
+ // do another sticky collection next.
+ // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
+ // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
+ // if the sticky GC throughput always remained >= the full/partial throughput.
+ if (collector_ran->GetEstimatedLastIterationThroughput() >=
+ non_sticky_collector->GetEstimatedMeanThroughput() &&
+ non_sticky_collector->GetIterations() > 0 &&
+ bytes_allocated <= max_allowed_footprint_) {
next_gc_type_ = collector::kGcTypeSticky;
} else {
- next_gc_type_ = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ next_gc_type_ = non_sticky_gc_type;
}
// If we have freed enough memory, shrink the heap back down.
if (bytes_allocated + max_free_ < max_allowed_footprint_) {
@@ -2516,7 +2529,7 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
if (IsGcConcurrent()) {
// Calculate when to perform the next ConcurrentGC.
// Calculate the estimated GC duration.
- const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
+ const double gc_duration_seconds = NsToMs(collector_ran->GetDurationNs()) / 1000.0;
// Estimate how many remaining bytes we will have when we need to start the next GC.
size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 60b8450..a522750 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -658,10 +658,13 @@ class Heap {
// bytes allocated and the target utilization ratio.
void UpdateMaxNativeFootprint();
+ // Find a collector based on GC type.
+ collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
+
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
- void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration);
+ void GrowForUtilization(collector::GarbageCollector* collector_ran);
size_t GetPercentFree();