summaryrefslogtreecommitdiffstats
path: root/runtime/gc/collector
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-06-20 10:37:54 -0700
committerMathieu Chartier <mathieuc@google.com>2014-06-24 13:45:05 -0700
commitf5997b4d3f889569d5a2b724d83d764bfbb8d106 (patch)
treef0d3b3890a604a8b3c4c87c0f408f86df041690e /runtime/gc/collector
parent9658d24deaa9bd07781bfac860d2a6dd89066d55 (diff)
downloadart-f5997b4d3f889569d5a2b724d83d764bfbb8d106.zip
art-f5997b4d3f889569d5a2b724d83d764bfbb8d106.tar.gz
art-f5997b4d3f889569d5a2b724d83d764bfbb8d106.tar.bz2
More advanced timing loggers.
The new timing loggers have lower overhead since they only push into a vector. The new format has two types, a start timing and a stop timing. You can thing of these as brackets associated with a timestamp. It uses these to construct various statistics when needed, such as: Total time, exclusive time, and nesting depth. Changed PrettyDuration to have a default of 3 digits after the decimal point. Exaple of a GC dump with exclusive / total times and indenting: I/art (23546): GC iteration timing logger [Exclusive time] [Total time] I/art (23546): 0ms InitializePhase I/art (23546): 0.305ms/167.746ms MarkingPhase I/art (23546): 0ms BindBitmaps I/art (23546): 0ms FindDefaultSpaceBitmap I/art (23546): 0ms/1.709ms ProcessCards I/art (23546): 0.183ms ImageModUnionClearCards I/art (23546): 0.916ms ZygoteModUnionClearCards I/art (23546): 0.610ms AllocSpaceClearCards I/art (23546): 1.373ms AllocSpaceClearCards I/art (23546): 0.305ms/6.318ms MarkRoots I/art (23546): 2.106ms MarkRootsCheckpoint I/art (23546): 0.153ms MarkNonThreadRoots I/art (23546): 4.287ms MarkConcurrentRoots I/art (23546): 43.461ms UpdateAndMarkImageModUnionTable I/art (23546): 0ms/112.712ms RecursiveMark I/art (23546): 112.712ms ProcessMarkStack I/art (23546): 0.610ms/2.777ms PreCleanCards I/art (23546): 0.305ms/0.855ms ProcessCards I/art (23546): 0.153ms ImageModUnionClearCards I/art (23546): 0.610ms ZygoteModUnionClearCards I/art (23546): 0.610ms AllocSpaceClearCards I/art (23546): 0.549ms AllocSpaceClearCards I/art (23546): 0.549ms MarkRootsCheckpoint I/art (23546): 0.610ms MarkNonThreadRoots I/art (23546): 0ms MarkConcurrentRoots I/art (23546): 0.610ms ScanGrayImageSpaceObjects I/art (23546): 0.305ms ScanGrayZygoteSpaceObjects I/art (23546): 0.305ms ScanGrayAllocSpaceObjects I/art (23546): 1.129ms ScanGrayAllocSpaceObjects I/art (23546): 0ms ProcessMarkStack I/art (23546): 0ms/0.977ms (Paused)PausePhase I/art (23546): 0.244ms ReMarkRoots I/art (23546): 0.672ms (Paused)ScanGrayObjects I/art (23546): 0ms (Paused)ProcessMarkStack I/art (23546): 0ms/0.610ms SwapStacks I/art (23546): 0.610ms RevokeAllThreadLocalAllocationStacks I/art (23546): 0ms PreSweepingGcVerification I/art (23546): 0ms/10.621ms ReclaimPhase I/art (23546): 0.610ms/0.702ms ProcessReferences I/art (23546): 0.214ms/0.641ms EnqueueFinalizerReferences I/art (23546): 0.427ms ProcessMarkStack I/art (23546): 0.488ms SweepSystemWeaks I/art (23546): 0.824ms/9.400ms Sweep I/art (23546): 0ms SweepMallocSpace I/art (23546): 0.214ms SweepZygoteSpace I/art (23546): 0.122ms SweepMallocSpace I/art (23546): 6.226ms SweepMallocSpace I/art (23546): 0ms SweepMallocSpace I/art (23546): 2.144ms SweepLargeObjects I/art (23546): 0.305ms SwapBitmaps I/art (23546): 0ms UnBindBitmaps I/art (23546): 0.275ms FinishPhase I/art (23546): GC iteration timing logger: end, 178.971ms Change-Id: Ia55b65609468f212b3cd65cda66b843da42be645
Diffstat (limited to 'runtime/gc/collector')
-rw-r--r--runtime/gc/collector/garbage_collector.cc1
-rw-r--r--runtime/gc/collector/mark_compact.cc63
-rw-r--r--runtime/gc/collector/mark_sweep.cc144
-rw-r--r--runtime/gc/collector/semi_space.cc83
4 files changed, 127 insertions, 164 deletions
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 8622fd6..46d79bf 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -101,6 +101,7 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
}
void GarbageCollector::SwapBitmaps() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
// these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
// bits of dead objects in the live bitmap.
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index ebd1738..4044852 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -49,7 +49,6 @@
#include "thread-inl.h"
#include "thread_list.h"
-using ::art::mirror::Class;
using ::art::mirror::Object;
namespace art {
@@ -57,7 +56,7 @@ namespace gc {
namespace collector {
void MarkCompact::BindBitmaps() {
- GetTimings()->StartSplit("BindBitmaps");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -66,7 +65,6 @@ void MarkCompact::BindBitmaps() {
CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
}
}
- GetTimings()->EndSplit();
}
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
@@ -120,7 +118,7 @@ class CalculateObjectForwardingAddressVisitor {
};
void MarkCompact::CalculateObjectForwardingAddresses() {
- GetTimings()->NewSplit(__FUNCTION__);
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// The bump pointer in the space where the next forwarding address will be.
bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
// Visit all the marked objects in the bitmap.
@@ -131,7 +129,7 @@ void MarkCompact::CalculateObjectForwardingAddresses() {
}
void MarkCompact::InitializePhase() {
- TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
@@ -143,7 +141,6 @@ void MarkCompact::InitializePhase() {
}
void MarkCompact::ProcessReferences(Thread* self) {
- TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap_->GetReferenceProcessor()->ProcessReferences(
false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
@@ -187,6 +184,7 @@ inline void MarkCompact::MarkObject(mirror::Object* obj) {
}
void MarkCompact::MarkingPhase() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Thread* self = Thread::Current();
// Bitmap which describes which objects we have to move.
objects_before_forwarding_.reset(accounting::ContinuousSpaceBitmap::Create(
@@ -195,21 +193,22 @@ void MarkCompact::MarkingPhase() {
objects_with_lockword_.reset(accounting::ContinuousSpaceBitmap::Create(
"objects with lock words", space_->Begin(), space_->Size()));
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
- TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
// Assume the cleared space is already empty.
BindBitmaps();
+ t.NewTiming("ProcessCards");
// Process dirty cards and add dirty cards to mod-union tables.
heap_->ProcessCards(GetTimings(), false);
// Clear the whole card table since we can not Get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
- GetTimings()->NewSplit("ClearCardTable");
+ t.NewTiming("ClearCardTable");
heap_->GetCardTable()->ClearCardTable();
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
- GetTimings()->NewSplit("SwapStacks");
if (kUseThreadLocalAllocationStack) {
+ t.NewTiming("RevokeAllThreadLocalAllocationStacks");
heap_->RevokeAllThreadLocalAllocationStacks(self);
}
+ t.NewTiming("SwapStacks");
heap_->SwapStacks(self);
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -227,21 +226,20 @@ void MarkCompact::MarkingPhase() {
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
- GetTimings()->StartSplit("PreSweepingGcVerification");
// Disabled due to an issue where we have objects in the bump pointer space which reference dead
// objects.
// heap_->PreSweepingGcVerification(this);
- GetTimings()->EndSplit();
}
void MarkCompact::UpdateAndMarkModUnion() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
for (auto& space : heap_->GetContinuousSpaces()) {
// If the space is immune then we need to mark the references to other spaces.
if (immune_region_.ContainsSpace(space)) {
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
- TimingLogger::ScopedSplit split(
+ TimingLogger::ScopedTiming t(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable", GetTimings());
table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
@@ -251,28 +249,28 @@ void MarkCompact::UpdateAndMarkModUnion() {
}
void MarkCompact::MarkReachableObjects() {
- GetTimings()->StartSplit("MarkStackAsLive");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
- heap_->MarkAllocStackAsLive(live_stack);
+ {
+ TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
+ heap_->MarkAllocStackAsLive(live_stack);
+ }
live_stack->Reset();
// Recursively process the mark stack.
ProcessMarkStack();
- GetTimings()->EndSplit();
}
void MarkCompact::ReclaimPhase() {
- TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Reclaim unmarked objects.
Sweep(false);
// Swap the live and mark bitmaps for each space which we modified space. This is an
// optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
// bitmaps.
- GetTimings()->StartSplit("SwapBitmapsAndUnBindBitmaps");
SwapBitmaps();
GetHeap()->UnBindBitmaps(); // Unbind the live and mark bitmaps.
Compact();
- GetTimings()->EndSplit();
}
void MarkCompact::ResizeMarkStack(size_t new_size) {
@@ -340,7 +338,7 @@ class UpdateObjectReferencesVisitor {
};
void MarkCompact::UpdateReferences() {
- GetTimings()->NewSplit(__FUNCTION__);
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime* runtime = Runtime::Current();
// Update roots.
runtime->VisitRoots(UpdateRootCallback, this);
@@ -350,7 +348,7 @@ void MarkCompact::UpdateReferences() {
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
- TimingLogger::ScopedSplit split(
+ TimingLogger::ScopedTiming t(
space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
"UpdateImageModUnionTableReferences",
GetTimings());
@@ -381,7 +379,7 @@ void MarkCompact::UpdateReferences() {
}
void MarkCompact::Compact() {
- GetTimings()->NewSplit(__FUNCTION__);
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
CalculateObjectForwardingAddresses();
UpdateReferences();
MoveObjects();
@@ -389,7 +387,7 @@ void MarkCompact::Compact() {
int64_t objects_freed = space_->GetObjectsAllocated() - live_objects_in_space_;
int64_t bytes_freed = reinterpret_cast<int64_t>(space_->End()) -
reinterpret_cast<int64_t>(bump_pointer_);
- GetTimings()->NewSplit("RecordFree");
+ t.NewTiming("RecordFree");
space_->RecordFree(objects_freed, bytes_freed);
RecordFree(ObjectBytePair(objects_freed, bytes_freed));
space_->SetEnd(bump_pointer_);
@@ -399,7 +397,7 @@ void MarkCompact::Compact() {
// Marks all objects in the root set.
void MarkCompact::MarkRoots() {
- GetTimings()->NewSplit("MarkRoots");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime::Current()->VisitRoots(MarkRootCallback, this);
}
@@ -483,9 +481,8 @@ bool MarkCompact::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Obje
}
void MarkCompact::SweepSystemWeaks() {
- GetTimings()->StartSplit("SweepSystemWeaks");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
- GetTimings()->EndSplit();
}
bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -523,7 +520,7 @@ void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
}
void MarkCompact::MoveObjects() {
- GetTimings()->NewSplit(__FUNCTION__);
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Move the objects in the before forwarding bitmap.
MoveObjectVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
@@ -533,15 +530,15 @@ void MarkCompact::MoveObjects() {
}
void MarkCompact::Sweep(bool swap_bitmaps) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit split("Sweep", GetTimings());
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
if (!ShouldSweepSpace(alloc_space)) {
continue;
}
- TimingLogger::ScopedSplit split(
+ TimingLogger::ScopedTiming t(
alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
@@ -550,7 +547,7 @@ void MarkCompact::Sweep(bool swap_bitmaps) {
}
void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
@@ -590,13 +587,12 @@ void MarkCompact::ScanObject(Object* obj) {
// Scan anything that's on the mark stack.
void MarkCompact::ProcessMarkStack() {
- GetTimings()->StartSplit("ProcessMarkStack");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
DCHECK(obj != nullptr);
ScanObject(obj);
}
- GetTimings()->EndSplit();
}
void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
@@ -605,7 +601,7 @@ void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
}
void MarkCompact::FinishPhase() {
- TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
space_ = nullptr;
CHECK(mark_stack_->IsEmpty());
mark_stack_->Reset();
@@ -618,9 +614,8 @@ void MarkCompact::FinishPhase() {
}
void MarkCompact::RevokeAllThreadLocalBuffers() {
- GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
GetHeap()->RevokeAllThreadLocalBuffers();
- GetTimings()->EndSplit();
}
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index d08796b..7e97b3b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -81,7 +81,7 @@ static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
void MarkSweep::BindBitmaps() {
- GetTimings()->StartSplit("BindBitmaps");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -89,7 +89,6 @@ void MarkSweep::BindBitmaps() {
CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
}
}
- GetTimings()->EndSplit();
}
MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
@@ -110,7 +109,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
}
void MarkSweep::InitializePhase() {
- TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
@@ -170,7 +169,6 @@ void MarkSweep::RunPhases() {
}
void MarkSweep::ProcessReferences(Thread* self) {
- TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
@@ -178,7 +176,7 @@ void MarkSweep::ProcessReferences(Thread* self) {
}
void MarkSweep::PausePhase() {
- TimingLogger::ScopedSplit split("(Paused)PausePhase", GetTimings());
+ TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
if (IsConcurrent()) {
@@ -190,7 +188,7 @@ void MarkSweep::PausePhase() {
RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
}
{
- TimingLogger::ScopedSplit split("SwapStacks", GetTimings());
+ TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap_->SwapStacks(self);
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
@@ -198,9 +196,7 @@ void MarkSweep::PausePhase() {
// stacks and don't want anybody to allocate into the live stack.
RevokeAllThreadLocalAllocationStacks(self);
}
- GetTimings()->StartSplit("PreSweepingGcVerification");
heap_->PreSweepingGcVerification(this);
- GetTimings()->EndSplit();
// Disallow new system weaks to prevent a race which occurs when someone adds a new system
// weak before we sweep them. Since this new system weak may not be marked, the GC may
// incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
@@ -214,6 +210,7 @@ void MarkSweep::PausePhase() {
void MarkSweep::PreCleanCards() {
// Don't do this for non concurrent GCs since they don't have any dirty cards.
if (kPreCleanCards && IsConcurrent()) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Thread* self = Thread::Current();
CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
// Process dirty cards and add dirty cards to mod union tables, also ages cards.
@@ -243,14 +240,14 @@ void MarkSweep::PreCleanCards() {
void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
if (kUseThreadLocalAllocationStack) {
- GetTimings()->NewSplit("RevokeAllThreadLocalAllocationStacks");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Locks::mutator_lock_->AssertExclusiveHeld(self);
heap_->RevokeAllThreadLocalAllocationStacks(self);
}
}
void MarkSweep::MarkingPhase() {
- TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Thread* self = Thread::Current();
BindBitmaps();
FindDefaultSpaceBitmap();
@@ -268,7 +265,7 @@ void MarkSweep::UpdateAndMarkModUnion() {
if (immune_region_.ContainsSpace(space)) {
const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable";
- TimingLogger::ScopedSplit split(name, GetTimings());
+ TimingLogger::ScopedTiming t(name, GetTimings());
accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
CHECK(mod_union_table != nullptr);
mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
@@ -283,7 +280,7 @@ void MarkSweep::MarkReachableObjects() {
}
void MarkSweep::ReclaimPhase() {
- TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Thread* self = Thread::Current();
// Process the references concurrently.
ProcessReferences(self);
@@ -291,25 +288,19 @@ void MarkSweep::ReclaimPhase() {
Runtime::Current()->AllowNewSystemWeaks();
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-
// Reclaim unmarked objects.
Sweep(false);
-
// Swap the live and mark bitmaps for each space which we modified space. This is an
// optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
// bitmaps.
- GetTimings()->StartSplit("SwapBitmaps");
SwapBitmaps();
- GetTimings()->EndSplit();
-
// Unbind the live and mark bitmaps.
- TimingLogger::ScopedSplit split("UnBindBitmaps", GetTimings());
GetHeap()->UnBindBitmaps();
}
}
void MarkSweep::FindDefaultSpaceBitmap() {
- TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
// We want to have the main space instead of non moving if possible.
@@ -506,11 +497,10 @@ void MarkSweep::VerifyRoots() {
}
void MarkSweep::MarkRoots(Thread* self) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
// If we exclusively hold the mutator lock, all threads must be suspended.
- GetTimings()->StartSplit("MarkRoots");
Runtime::Current()->VisitRoots(MarkRootCallback, this);
- GetTimings()->EndSplit();
RevokeAllThreadLocalAllocationStacks(self);
} else {
MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
@@ -522,16 +512,14 @@ void MarkSweep::MarkRoots(Thread* self) {
}
void MarkSweep::MarkNonThreadRoots() {
- GetTimings()->StartSplit("MarkNonThreadRoots");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
- GetTimings()->EndSplit();
}
void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
- GetTimings()->StartSplit("MarkConcurrentRoots");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Visit all runtime roots and clear dirty flags.
Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
- GetTimings()->EndSplit();
}
class ScanObjectVisitor {
@@ -752,7 +740,8 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
Thread* self = Thread::Current();
// Can't have a different split for each space since multiple spaces can have their cards being
// scanned at the same time.
- GetTimings()->StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
+ TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
+ GetTimings());
// Try to take some of the mark stack since we can pass this off to the worker tasks.
Object** mark_stack_begin = mark_stack_->Begin();
Object** mark_stack_end = mark_stack_->End();
@@ -805,28 +794,28 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
thread_pool->StartWorkers(self);
thread_pool->Wait(self, true, true);
thread_pool->StopWorkers(self);
- GetTimings()->EndSplit();
} else {
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetMarkBitmap() != nullptr) {
// Image spaces are handled properly since live == marked for them.
+ const char* name = nullptr;
switch (space->GetGcRetentionPolicy()) {
- case space::kGcRetentionPolicyNeverCollect:
- GetTimings()->StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
- "ScanGrayImageSpaceObjects");
- break;
- case space::kGcRetentionPolicyFullCollect:
- GetTimings()->StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
- "ScanGrayZygoteSpaceObjects");
- break;
- case space::kGcRetentionPolicyAlwaysCollect:
- GetTimings()->StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
- "ScanGrayAllocSpaceObjects");
- break;
- }
+ case space::kGcRetentionPolicyNeverCollect:
+ name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
+ break;
+ case space::kGcRetentionPolicyFullCollect:
+ name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
+ break;
+ case space::kGcRetentionPolicyAlwaysCollect:
+ name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
+ break;
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+ TimingLogger::ScopedTiming t(name, GetTimings());
ScanObjectVisitor visitor(this);
- card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
- GetTimings()->EndSplit();
+ card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
+ minimum_age);
}
}
}
@@ -836,9 +825,7 @@ class RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
- : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
- bitmap_(bitmap),
- begin_(begin),
+ : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
end_(end) {
}
@@ -863,7 +850,7 @@ class RecursiveMarkTask : public MarkStackTask<false> {
// Populates the mark stack based on the set of marked objects and
// recursively marks until the mark stack is emptied.
void MarkSweep::RecursiveMark() {
- TimingLogger::ScopedSplit split("RecursiveMark", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// RecursiveMark will build the lists of known instances of the Reference classes. See
// DelayReferenceReferent for details.
if (kUseRecursiveMark) {
@@ -930,25 +917,22 @@ void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
}
void MarkSweep::ReMarkRoots() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- GetTimings()->StartSplit("(Paused)ReMarkRoots");
Runtime::Current()->VisitRoots(
MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
kVisitRootFlagStopLoggingNewRoots |
kVisitRootFlagClearRootLog));
- GetTimings()->EndSplit();
if (kVerifyRootsMarked) {
- GetTimings()->StartSplit("(Paused)VerifyRoots");
+ TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings());
Runtime::Current()->VisitRoots(VerifyRootMarked, this);
- GetTimings()->EndSplit();
}
}
void MarkSweep::SweepSystemWeaks(Thread* self) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- GetTimings()->StartSplit("SweepSystemWeaks");
Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
- GetTimings()->EndSplit();
}
mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
@@ -969,6 +953,7 @@ void MarkSweep::VerifyIsLive(const Object* obj) {
}
void MarkSweep::VerifySystemWeaks() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Verify system weaks, uses a special object visitor which returns the input object.
Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
}
@@ -1005,8 +990,8 @@ class CheckpointMarkThreadRoots : public Closure {
void MarkSweep::MarkRootsCheckpoint(Thread* self,
bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
- GetTimings()->StartSplit("MarkRootsCheckpoint");
ThreadList* thread_list = Runtime::Current()->GetThreadList();
// Request the check point is run on all threads returning a count of the threads that must
// run through the barrier including self.
@@ -1021,11 +1006,10 @@ void MarkSweep::MarkRootsCheckpoint(Thread* self,
}
Locks::mutator_lock_->SharedLock(self);
Locks::heap_bitmap_lock_->ExclusiveLock(self);
- GetTimings()->EndSplit();
}
void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
- GetTimings()->StartSplit("SweepArray");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Thread* self = Thread::Current();
mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
sweep_array_free_buffer_mem_map_->BaseBegin());
@@ -1072,10 +1056,9 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
// if needed.
if (!mark_bitmap->Test(obj)) {
if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
- GetTimings()->StartSplit("FreeList");
+ TimingLogger::ScopedTiming t("FreeList", GetTimings());
freed.objects += chunk_free_pos;
freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
- GetTimings()->EndSplit();
chunk_free_pos = 0;
}
chunk_free_buffer[chunk_free_pos++] = obj;
@@ -1085,10 +1068,9 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
}
}
if (chunk_free_pos > 0) {
- GetTimings()->StartSplit("FreeList");
+ TimingLogger::ScopedTiming t("FreeList", GetTimings());
freed.objects += chunk_free_pos;
freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
- GetTimings()->EndSplit();
chunk_free_pos = 0;
}
// All of the references which space contained are no longer in the allocation stack, update
@@ -1113,31 +1095,33 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
freed_los.bytes += large_object_space->Free(self, obj);
}
}
- GetTimings()->NewSplit("RecordFree");
- RecordFree(freed);
- RecordFreeLOS(freed_los);
- GetTimings()->NewSplit("ResetStack");
- allocations->Reset();
- GetTimings()->EndSplit();
+ {
+ TimingLogger::ScopedTiming t("RecordFree", GetTimings());
+ RecordFree(freed);
+ RecordFreeLOS(freed_los);
+ t.NewTiming("ResetStack");
+ allocations->Reset();
+ }
sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
}
void MarkSweep::Sweep(bool swap_bitmaps) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Ensure that nobody inserted items in the live stack after we swapped the stacks.
CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
- // Mark everything allocated since the last as GC live so that we can sweep concurrently,
- // knowing that new allocations won't be marked as live.
- GetTimings()->StartSplit("MarkStackAsLive");
- accounting::ObjectStack* live_stack = heap_->GetLiveStack();
- heap_->MarkAllocStackAsLive(live_stack);
- live_stack->Reset();
- GetTimings()->EndSplit();
-
- DCHECK(mark_stack_->IsEmpty());
+ {
+ TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
+ // Mark everything allocated since the last as GC live so that we can sweep concurrently,
+ // knowing that new allocations won't be marked as live.
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ DCHECK(mark_stack_->IsEmpty());
+ }
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
- TimingLogger::ScopedSplit split(
+ TimingLogger::ScopedTiming split(
alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
@@ -1146,7 +1130,7 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
}
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+ TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
@@ -1215,7 +1199,7 @@ void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
// Scan anything that's on the mark stack.
void MarkSweep::ProcessMarkStack(bool paused) {
- GetTimings()->StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
+ TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
size_t thread_count = GetThreadCount(paused);
if (kParallelProcessMarkStack && thread_count > 1 &&
mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
@@ -1248,7 +1232,6 @@ void MarkSweep::ProcessMarkStack(bool paused) {
ScanObject(obj);
}
}
- GetTimings()->EndSplit();
}
inline bool MarkSweep::IsMarked(const Object* object) const {
@@ -1262,7 +1245,7 @@ inline bool MarkSweep::IsMarked(const Object* object) const {
}
void MarkSweep::FinishPhase() {
- TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
if (kCountScannedTypes) {
VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
<< " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
@@ -1299,9 +1282,8 @@ void MarkSweep::RevokeAllThreadLocalBuffers() {
// not be in use.
GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
} else {
- GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
GetHeap()->RevokeAllThreadLocalBuffers();
- GetTimings()->EndSplit();
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 8a3ac9d..cabfe21 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -59,7 +59,7 @@ static constexpr size_t kBytesPromotedThreshold = 4 * MB;
static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
void SemiSpace::BindBitmaps() {
- GetTimings()->StartSplit("BindBitmaps");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -83,7 +83,6 @@ void SemiSpace::BindBitmaps() {
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
}
- GetTimings()->EndSplit();
}
SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
@@ -131,7 +130,7 @@ void SemiSpace::RunPhases() {
}
void SemiSpace::InitializePhase() {
- TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
@@ -151,7 +150,6 @@ void SemiSpace::InitializePhase() {
}
void SemiSpace::ProcessReferences(Thread* self) {
- TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
@@ -159,6 +157,7 @@ void SemiSpace::ProcessReferences(Thread* self) {
}
void SemiSpace::MarkingPhase() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
if (kStoreStackTraces) {
Locks::mutator_lock_->AssertExclusiveHeld(self_);
@@ -197,10 +196,7 @@ void SemiSpace::MarkingPhase() {
// If generational, clear soft references if a whole heap collection.
GetCurrentIteration()->SetClearSoftReferences(true);
}
-
Locks::mutator_lock_->AssertExclusiveHeld(self_);
-
- TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
if (generational_) {
// If last_gc_to_space_end_ is out of the bounds of the from-space
// (the to-space from last GC), then point it to the beginning of
@@ -218,12 +214,13 @@ void SemiSpace::MarkingPhase() {
heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_);
// Clear the whole card table since we can not Get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
- GetTimings()->NewSplit("ClearCardTable");
+ t.NewTiming("ClearCardTable");
heap_->GetCardTable()->ClearCardTable();
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
- GetTimings()->NewSplit("SwapStacks");
+ t.NewTiming("SwapStacks");
if (kUseThreadLocalAllocationStack) {
+ TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
heap_->RevokeAllThreadLocalAllocationStacks(self_);
}
heap_->SwapStacks(self_);
@@ -240,7 +237,6 @@ void SemiSpace::MarkingPhase() {
ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}
- GetTimings()->NewSplit("RecordFree");
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
@@ -257,9 +253,7 @@ void SemiSpace::MarkingPhase() {
from_space_->Clear();
VLOG(heap) << "Protecting from_space_: " << *from_space_;
from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ);
- GetTimings()->StartSplit("PreSweepingGcVerification");
heap_->PreSweepingGcVerification(this);
- GetTimings()->EndSplit();
if (swap_semi_spaces_) {
heap_->SwapSemiSpaces();
}
@@ -272,7 +266,7 @@ void SemiSpace::UpdateAndMarkModUnion() {
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
- TimingLogger::ScopedSplit split(
+ TimingLogger::ScopedTiming t(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable",
GetTimings());
@@ -354,12 +348,14 @@ class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
};
void SemiSpace::MarkReachableObjects() {
- GetTimings()->StartSplit("MarkStackAsLive");
- accounting::ObjectStack* live_stack = heap_->GetLiveStack();
- heap_->MarkAllocStackAsLive(live_stack);
- live_stack->Reset();
-
- GetTimings()->NewSplit("UpdateAndMarkRememberedSets");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ {
+ TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ }
+ t.NewTiming("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
// If the space is immune and has no mod union table (the
// non-moving space when the bump pointer space only collection is
@@ -398,7 +394,7 @@ void SemiSpace::MarkReachableObjects() {
}
if (is_large_object_space_immune_) {
- GetTimings()->NewSplit("VisitLargeObjects");
+ TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
DCHECK(generational_ && !whole_heap_collection_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
@@ -416,31 +412,24 @@ void SemiSpace::MarkReachableObjects() {
reinterpret_cast<uintptr_t>(large_object_space->End()),
visitor);
}
- GetTimings()->EndSplit();
// Recursively process the mark stack.
ProcessMarkStack();
}
void SemiSpace::ReclaimPhase() {
- TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
- {
- WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
- // Reclaim unmarked objects.
- Sweep(false);
- // Swap the live and mark bitmaps for each space which we modified space. This is an
- // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
- // bitmaps.
- GetTimings()->StartSplit("SwapBitmaps");
- SwapBitmaps();
- GetTimings()->EndSplit();
- // Unbind the live and mark bitmaps.
- TimingLogger::ScopedSplit split("UnBindBitmaps", GetTimings());
- GetHeap()->UnBindBitmaps();
- }
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
+ // Reclaim unmarked objects.
+ Sweep(false);
+ // Swap the live and mark bitmaps for each space which we modified space. This is an
+ // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+ // bitmaps.
+ SwapBitmaps();
+ // Unbind the live and mark bitmaps.
+ GetHeap()->UnBindBitmaps();
if (saved_bytes_ > 0) {
VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
}
-
if (generational_) {
// Record the end (top) of the to space so we can distinguish
// between objects that were allocated since the last GC and the
@@ -629,8 +618,7 @@ void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*
// Marks all objects in the root set.
void SemiSpace::MarkRoots() {
- GetTimings()->NewSplit("MarkRoots");
- // TODO: Visit up image roots as well?
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime::Current()->VisitRoots(MarkRootCallback, this);
}
@@ -655,9 +643,8 @@ mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* objec
}
void SemiSpace::SweepSystemWeaks() {
- GetTimings()->StartSplit("SweepSystemWeaks");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
- GetTimings()->EndSplit();
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -665,15 +652,15 @@ bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
}
void SemiSpace::Sweep(bool swap_bitmaps) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit split("Sweep", GetTimings());
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
if (!ShouldSweepSpace(alloc_space)) {
continue;
}
- TimingLogger::ScopedSplit split(
+ TimingLogger::ScopedTiming split(
alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
@@ -685,7 +672,7 @@ void SemiSpace::Sweep(bool swap_bitmaps) {
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
DCHECK(!is_large_object_space_immune_);
- TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+ TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
@@ -726,6 +713,7 @@ void SemiSpace::ScanObject(Object* obj) {
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
space::MallocSpace* promo_dest_space = nullptr;
accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
if (generational_ && !whole_heap_collection_) {
@@ -739,7 +727,6 @@ void SemiSpace::ProcessMarkStack() {
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
- GetTimings()->StartSplit("ProcessMarkStack");
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
@@ -750,7 +737,6 @@ void SemiSpace::ProcessMarkStack() {
}
ScanObject(obj);
}
- GetTimings()->EndSplit();
}
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
@@ -781,7 +767,7 @@ void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
}
void SemiSpace::FinishPhase() {
- TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Null the "to" and "from" spaces since compacting from one to the other isn't valid until
// further action is done by the heap.
to_space_ = nullptr;
@@ -822,9 +808,8 @@ void SemiSpace::FinishPhase() {
}
void SemiSpace::RevokeAllThreadLocalBuffers() {
- GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
GetHeap()->RevokeAllThreadLocalBuffers();
- GetTimings()->EndSplit();
}
} // namespace collector