summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-05-02 10:27:26 -0700
committerMathieu Chartier <mathieuc@google.com>2014-05-02 11:20:24 -0700
commit89a201e599da95d88590c7b4d069b76735e20fa8 (patch)
treec01361799ceb663110ed8fb87f9f3d2d35ea22b5 /runtime/gc
parent6a6a5304e75a1af484975cedc832d0a80c3e5e61 (diff)
downloadart-89a201e599da95d88590c7b4d069b76735e20fa8.zip
art-89a201e599da95d88590c7b4d069b76735e20fa8.tar.gz
art-89a201e599da95d88590c7b4d069b76735e20fa8.tar.bz2
Add GC cause to WaitForGcToComplete message.
Change-Id: I8fe107d90a84de065c407b8d29fd106267ac440d
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/gc_cause.cc4
-rw-r--r--runtime/gc/gc_cause.h4
-rw-r--r--runtime/gc/heap.cc23
-rw-r--r--runtime/gc/heap.h5
4 files changed, 22 insertions, 14 deletions
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index b25f7ff..9e73f14 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -29,7 +29,9 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
case kGcCauseForNativeAlloc: return "NativeAlloc";
- case kGcCauseCollectorTransition: return" CollectorTransition";
+ case kGcCauseCollectorTransition: return "CollectorTransition";
+ case kGcCauseDisableMovingGc: return "DisableMovingGc";
+ case kGcCauseTrim: return "HeapTrim";
default:
LOG(FATAL) << "Unreachable";
}
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 7499b9e..10e6667 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -35,6 +35,10 @@ enum GcCause {
kGcCauseForNativeAlloc,
// GC triggered for a collector transition.
kGcCauseCollectorTransition,
+ // Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
+ kGcCauseDisableMovingGc,
+ // Not a real GC cause, used when we trim the heap.
+ kGcCauseTrim,
};
const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d517bb..33026d1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -498,7 +498,7 @@ void Heap::IncrementDisableMovingGC(Thread* self) {
MutexLock mu(self, *gc_complete_lock_);
++disable_moving_gc_count_;
if (IsMovingGc(collector_type_running_)) {
- WaitForGcToCompleteLocked(self);
+ WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
}
}
@@ -962,7 +962,7 @@ void Heap::Trim() {
// trimming.
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
- WaitForGcToCompleteLocked(self);
+ WaitForGcToCompleteLocked(kGcCauseTrim, self);
collector_type_running_ = kCollectorTypeHeapTrim;
}
uint64_t start_ns = NanoTime();
@@ -1171,7 +1171,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
SirtRef<mirror::Class> sirt_klass(self, *klass);
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
- collector::GcType last_gc = WaitForGcToComplete(self);
+ collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
if (last_gc != collector::kGcTypeNone) {
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
@@ -1418,7 +1418,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
- WaitForGcToCompleteLocked(self);
+ WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
// If someone else beat us to it and changed the collector before we could, exit.
// This is safe to do before the suspend all since we set the collector_type_running_ before
// we exit the loop. If another thread attempts to do the heap transition before we exit,
@@ -1819,7 +1819,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
- WaitForGcToCompleteLocked(self);
+ WaitForGcToCompleteLocked(gc_cause, self);
compacting_gc = IsMovingGc(collector_type_);
// GC can be disabled if someone has a used GetPrimitiveArrayCritical.
if (compacting_gc && disable_moving_gc_count_ != 0) {
@@ -2448,13 +2448,13 @@ void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
}
}
-collector::GcType Heap::WaitForGcToComplete(Thread* self) {
+collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
- return WaitForGcToCompleteLocked(self);
+ return WaitForGcToCompleteLocked(cause, self);
}
-collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
+collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
collector::GcType last_gc_type = collector::kGcTypeNone;
uint64_t wait_start = NanoTime();
while (collector_type_running_ != kCollectorTypeNone) {
@@ -2467,7 +2467,8 @@ collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
uint64_t wait_time = NanoTime() - wait_start;
total_wait_time_ += wait_time;
if (wait_time > long_pause_log_threshold_) {
- LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
+ LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
+ << " for cause " << cause;
}
return last_gc_type;
}
@@ -2659,7 +2660,7 @@ void Heap::ConcurrentGC(Thread* self) {
return;
}
// Wait for any GCs currently running to finish.
- if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
+ if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
// If the we can't run the GC type we wanted to run, find the next appropriate one and try that
// instead. E.g. can't do partial, so do full instead.
if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
@@ -2792,7 +2793,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
// The second watermark is higher than the gc watermark. If you hit this it means you are
// allocating native objects faster than the GC can keep up with.
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
- if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
+ if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
// Just finished a GC, attempt to run finalizers.
RunFinalization(env);
CHECK(!env->ExceptionCheck());
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d3b5cdc..d770024 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -300,7 +300,8 @@ class Heap {
// Blocks the caller until the garbage collector becomes idle and returns the type of GC we
// waited for.
- collector::GcType WaitForGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+ collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
+ LOCKS_EXCLUDED(gc_complete_lock_);
// Update the heap's process state to a new value, may cause compaction to occur.
void UpdateProcessState(ProcessState process_state);
@@ -641,7 +642,7 @@ class Heap {
// Blocks the caller until the garbage collector becomes idle and returns the type of GC we
// waited for.
- collector::GcType WaitForGcToCompleteLocked(Thread* self)
+ collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)