summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-03-13 14:46:09 -0700
committerMathieu Chartier <mathieuc@google.com>2014-03-13 15:32:51 -0700
commit7bf52d28978a6a747795cc0c78b04a3e83f0cf16 (patch)
tree3cbd64f45438a47d4caef9c32f0096307b1e2cf9 /runtime/gc
parent135ce2ea28e98df624aa071688bd01e0dadf2f62 (diff)
downloadart-7bf52d28978a6a747795cc0c78b04a3e83f0cf16.zip
art-7bf52d28978a6a747795cc0c78b04a3e83f0cf16.tar.gz
art-7bf52d28978a6a747795cc0c78b04a3e83f0cf16.tar.bz2
Fix heap trimming logic.
The new heap trimming logic introduced in the c/84301 was not very good since the heap trim would get avoided since the daemon thread was coming to the heap trimming code before the scheduled time. The new logic is to do the heap trim if the last heap trim occurred more than kHeapTrimWait ns ago. Change-Id: I9d0e6766bf0c68e5f7fb15fb059140e1f1264216
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/heap.cc25
-rw-r--r--runtime/gc/heap.h11
2 files changed, 22 insertions, 14 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d962f3c..13dd90e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -92,7 +92,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
background_collector_type_(background_collector_type),
desired_collector_type_(collector_type_),
heap_trim_request_lock_(nullptr),
- heap_trim_target_time_(0),
+ last_trim_time_(0),
heap_transition_target_time_(0),
heap_trim_request_pending_(false),
parallel_gc_threads_(parallel_gc_threads),
@@ -484,10 +484,11 @@ void Heap::UpdateProcessState(ProcessState process_state) {
process_state_ = process_state;
if (process_state_ == kProcessStateJankPerceptible) {
// Transition back to foreground right away to prevent jank.
- RequestHeapTransition(post_zygote_collector_type_, 0);
+ RequestCollectorTransition(post_zygote_collector_type_, 0);
} else {
// Don't delay for debug builds since we may want to stress test the GC.
- RequestHeapTransition(background_collector_type_, kIsDebugBuild ? 0 : kHeapTransitionWait);
+ RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
+ kCollectorTransitionWait);
}
}
}
@@ -903,7 +904,8 @@ void Heap::DoPendingTransitionOrTrim() {
ScopedThreadStateChange tsc(self, kSleeping);
usleep(wait_time / 1000); // Usleep takes microseconds.
}
- // Transition the heap if the desired collector type is nto the same as the current collector type.
+ // Transition the collector if the desired collector type is not the same as the current
+ // collector type.
TransitionCollector(desired_collector_type);
// Do a heap trim if it is needed.
Trim();
@@ -913,9 +915,10 @@ void Heap::Trim() {
Thread* self = Thread::Current();
{
MutexLock mu(self, *heap_trim_request_lock_);
- if (!heap_trim_request_pending_ || NanoTime() < heap_trim_target_time_) {
+ if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
return;
}
+ last_trim_time_ = NanoTime();
heap_trim_request_pending_ = false;
}
{
@@ -1804,7 +1807,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
collector->Run(gc_cause, clear_soft_references);
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
- RequestHeapTrim(Heap::kHeapTrimWait);
+ RequestHeapTrim();
// Enqueue cleared references.
EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
@@ -2567,7 +2570,7 @@ void Heap::ConcurrentGC(Thread* self) {
}
}
-void Heap::RequestHeapTransition(CollectorType desired_collector_type, uint64_t delta_time) {
+void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Thread* self = Thread::Current();
{
MutexLock mu(self, *heap_trim_request_lock_);
@@ -2580,7 +2583,7 @@ void Heap::RequestHeapTransition(CollectorType desired_collector_type, uint64_t
SignalHeapTrimDaemon(self);
}
-void Heap::RequestHeapTrim(uint64_t delta_time) {
+void Heap::RequestHeapTrim() {
// GC completed and now we must decide whether to request a heap trim (advising pages back to the
// kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
// a space it will hold its lock and can become a cause of jank.
@@ -2607,7 +2610,11 @@ void Heap::RequestHeapTrim(uint64_t delta_time) {
if (!CareAboutPauseTimes()) {
{
MutexLock mu(self, *heap_trim_request_lock_);
- heap_trim_target_time_ = std::max(heap_trim_target_time_, NanoTime() + delta_time);
+ if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
+ // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
+ // just yet.
+ return;
+ }
heap_trim_request_pending_ = true;
}
// Notify the daemon thread which will actually do the heap trim.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 797f44c..12c55c4 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -135,9 +135,10 @@ class Heap {
// Used so that we don't overflow the allocation time atomic integer.
static constexpr size_t kTimeAdjust = 1024;
- // How long we wait after a GC to perform a heap trim (nanoseconds).
+ // How often we allow heap trimming to happen (nanoseconds).
static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
- static constexpr uint64_t kHeapTransitionWait = MsToNs(5000);
+ // How long we wait after a transition request to perform a collector transition (nanoseconds).
+ static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
// Create a heap with the requested sizes. The possible empty
// image_file_names names specify Spaces to load based on
@@ -648,9 +649,9 @@ class Heap {
collector::GcType WaitForGcToCompleteLocked(Thread* self)
EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
- void RequestHeapTransition(CollectorType desired_collector_type, uint64_t delta_time)
+ void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(heap_trim_request_lock_);
- void RequestHeapTrim(uint64_t delta_time) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
@@ -754,7 +755,7 @@ class Heap {
// Lock which guards heap trim requests.
Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// When we want to perform the next heap trim (nano seconds).
- uint64_t heap_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
+ uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
// When we want to perform the next heap transition (nano seconds).
uint64_t heap_transition_target_time_ GUARDED_BY(heap_trim_request_lock_);
// If we have a heap trim request pending.