summaryrefslogtreecommitdiffstats
path: root/runtime/gc/collector/mark_sweep.cc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-03-04 11:07:42 -0800
committerMathieu Chartier <mathieuc@google.com>2014-03-04 14:21:58 -0800
commit893263b7d5bc2ca43a91ecb8071867f5134fc60a (patch)
treea99238843a9caad00122c8f7d13e031f5d81bc38 /runtime/gc/collector/mark_sweep.cc
parent2fece5941f12395a94e742313e7059a9e419994d (diff)
downloadart-893263b7d5bc2ca43a91ecb8071867f5134fc60a.zip
art-893263b7d5bc2ca43a91ecb8071867f5134fc60a.tar.gz
art-893263b7d5bc2ca43a91ecb8071867f5134fc60a.tar.bz2
Avoid marking old class linker and intern table roots during pause.
The new root visiting logic has a concept of a root log which holds new roots which were added since the start of the GC. This is an optimization since it lets us only mark these newly added roots during the pause (or pre-cleaning) since the other roots intern table and class linker roots were marked concurrently at the start of the GC. Before (EvaluateAndApplyChanges): MarkConcurrentRoots: Sum: 605.193ms After: MarkConcurrentRoots: Sum: 271.858ms This should also reduce pathological GC pauses which used to be able to happen when the intern table or class linker became "dirty" during the concurrent GC. Change-Id: I433fab021f2c339d50c35aaae7161a50a0901dec
Diffstat (limited to 'runtime/gc/collector/mark_sweep.cc')
-rw-r--r--runtime/gc/collector/mark_sweep.cc60
1 files changed, 35 insertions, 25 deletions
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index c39e56f..4aff68a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -86,6 +86,7 @@ static constexpr bool kCountJavaLangRefs = false;
// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
static constexpr bool kCheckLocks = kDebugLocking;
+static constexpr bool kVerifyRoots = kIsDebugBuild;
void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
// Bind live to mark bitmap if necessary.
@@ -255,7 +256,8 @@ void MarkSweep::PreCleanCards() {
MarkThreadRoots(self);
// TODO: Only mark the dirty roots.
MarkNonThreadRoots();
- MarkConcurrentRoots();
+ MarkConcurrentRoots(
+ static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
// Process the newly aged cards.
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
// TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
@@ -286,17 +288,8 @@ void MarkSweep::MarkingPhase() {
heap_->SwapStacks(self);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
- // If we exclusively hold the mutator lock, all threads must be suspended.
- MarkRoots();
- RevokeAllThreadLocalAllocationStacks(self);
- } else {
- MarkThreadRoots(self);
- // At this point the live stack should no longer have any mutators which push into it.
- MarkNonThreadRoots();
- }
+ MarkRoots(self);
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
- MarkConcurrentRoots();
UpdateAndMarkModUnion();
MarkReachableObjects();
// Pre-clean dirtied cards to reduce pauses.
@@ -583,17 +576,16 @@ inline void MarkSweep::MarkObject(const Object* obj) {
}
}
-void MarkSweep::MarkRoot(const Object* obj) {
- if (obj != NULL) {
- MarkObjectNonNull(obj);
- }
-}
-
void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
}
+void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/,
+ RootType /*root_type*/) {
+ CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
+}
+
void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
@@ -621,11 +613,20 @@ void MarkSweep::VerifyRoots() {
Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
}
-// Marks all objects in the root set.
-void MarkSweep::MarkRoots() {
- timings_.StartSplit("MarkRoots");
- Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this);
- timings_.EndSplit();
+void MarkSweep::MarkRoots(Thread* self) {
+ if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ // If we exclusively hold the mutator lock, all threads must be suspended.
+ timings_.StartSplit("MarkRoots");
+ Runtime::Current()->VisitRoots(MarkRootCallback, this);
+ timings_.EndSplit();
+ RevokeAllThreadLocalAllocationStacks(self);
+ } else {
+ MarkThreadRoots(self);
+ // At this point the live stack should no longer have any mutators which push into it.
+ MarkNonThreadRoots();
+ MarkConcurrentRoots(
+ static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
+ }
}
void MarkSweep::MarkNonThreadRoots() {
@@ -634,10 +635,10 @@ void MarkSweep::MarkNonThreadRoots() {
timings_.EndSplit();
}
-void MarkSweep::MarkConcurrentRoots() {
+void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
timings_.StartSplit("MarkConcurrentRoots");
// Visit all runtime roots and clear dirty flags.
- Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true);
+ Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
timings_.EndSplit();
}
@@ -1003,9 +1004,18 @@ void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
}
void MarkSweep::ReMarkRoots() {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
timings_.StartSplit("(Paused)ReMarkRoots");
- Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true);
+ Runtime::Current()->VisitRoots(
+ MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
+ kVisitRootFlagStopLoggingNewRoots |
+ kVisitRootFlagClearRootLog));
timings_.EndSplit();
+ if (kVerifyRoots) {
+ timings_.StartSplit("(Paused)VerifyRoots");
+ Runtime::Current()->VisitRoots(VerifyRootMarked, this);
+ timings_.EndSplit();
+ }
}
void MarkSweep::SweepSystemWeaks() {