summaryrefslogtreecommitdiffstats
path: root/runtime/gc/collector/mark_sweep.h
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-03-28 10:05:39 -0700
committerMathieu Chartier <mathieuc@google.com>2014-03-28 13:53:58 -0700
commit0f7bf6a3ad1798fde328a2bff48a4bf2d750a36b (patch)
treeda16cfe399a3619671a90b502e8ddfd15c11ee3e /runtime/gc/collector/mark_sweep.h
parent26162488da23d1cbd04e6112ea458847818f1dae (diff)
downloadart-0f7bf6a3ad1798fde328a2bff48a4bf2d750a36b.zip
art-0f7bf6a3ad1798fde328a2bff48a4bf2d750a36b.tar.gz
art-0f7bf6a3ad1798fde328a2bff48a4bf2d750a36b.tar.bz2
Swap allocation stacks in pause.
This enables us to collect objects allocated during the GC for both sticky, partial, and full GC. This also significantly simplifies GC code. No measured performance impact on benchmarks, but this should slightly increase sticky GC throughput. Changed RevokeRosAllocThreadLocalBuffers to happen at most once per GC. Previously it occured twice if pre-cleaning was enabled. Renamed HandleDirtyObjectsPhase to PausePhase and enabled it for non-concurrent GC. This helps reduce duplicated code which was in both HandleDirtyObjectsPhase for concurrent GC and ReclaimPhase for non-concurrent GC. Change-Id: I533414b5c2cd2800f00724418e0ff90e7fdb0252
Diffstat (limited to 'runtime/gc/collector/mark_sweep.h')
-rw-r--r--runtime/gc/collector/mark_sweep.h19
1 files changed, 4 insertions, 15 deletions
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 9bf4cd1..f1fd546 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -58,7 +58,7 @@ class MarkSweep : public GarbageCollector {
virtual void InitializePhase() OVERRIDE;
virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual void HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void PausePhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void MarkReachableObjects()
@@ -96,7 +96,7 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void MarkRootsCheckpoint(Thread* self)
+ void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -159,8 +159,8 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void SweepSystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ void SweepSystemWeaks(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -224,17 +224,6 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a
- // space set, removing the object from the set.
- void UnMarkObjectNonNull(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- // Mark the vm thread roots.
- void MarkThreadRoots(Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj);