diff options
author | Zuo Wang <zuo.wang@intel.com> | 2014-07-10 04:26:41 -0700 |
---|---|---|
committer | Mathieu Chartier <mathieuc@google.com> | 2014-07-11 18:32:07 -0700 |
commit | f37a88b8e6db6c587fa449a12e40cb46be1689fc (patch) | |
tree | 2e1d8b20e87796e1ad5d682dcce2a52a37f20123 /runtime/gc/heap.h | |
parent | 9531f62ef260cbd0d0512e4c96f5d5dd2f4fdbb2 (diff) | |
download | art-f37a88b8e6db6c587fa449a12e40cb46be1689fc.zip art-f37a88b8e6db6c587fa449a12e40cb46be1689fc.tar.gz art-f37a88b8e6db6c587fa449a12e40cb46be1689fc.tar.bz2 |
ART: Compacting ROS/DlMalloc spaces with semispace copy GC
Current semispace copy GC is mainly associated with bump pointer
spaces. Though it squeezes fragmentation most aggressively, an extra
copy is required to re-establish the data in the ROS/DlMalloc space to allow
CMS GCs to happen afterwards. As semispace copy GC is still stop-the-world,
this not only introduces unnecessary overheads but also longer response time.
Response time indicates the time duration between the start of transition
request and the start of transition animation, which may impact the user
experience.
Using semispace copy GC to compact the data in a ROS space to another ROS(or
DlMalloc space to another DlMalloc) space solves this problem. Although it
squeezes less fragmentation, CMS GCs can run immediately after the compaction.
We apply this algorithm in two cases:
1) Right before throwing an OOM if -XX:EnableHSpaceCompactForOOM is passed in
as true.
2) When app is switched to background if the -XX:BackgroundGC option has value
HSpaceCompact.
For case 1), OOMs are significantly delayed in the harmony GC stress test,
with compaction ratio up to 0.87. For case 2), compaction ratio around 0.5 is
observed in both built-in SMS and browser. Similar results have been obtained
on other apps as well.
Change-Id: Iad9eabc6d046659fda3535ae20f21bc31f89ded3
Signed-off-by: Wang, Zuo <zuo.wang@intel.com>
Signed-off-by: Chang, Yang <yang.chang@intel.com>
Signed-off-by: Lei Li <lei.l.li@intel.com>
Signed-off-by: Lin Zang <lin.zang@intel.com>
Diffstat (limited to 'runtime/gc/heap.h')
-rw-r--r-- | runtime/gc/heap.h | 65 |
1 files changed, 61 insertions, 4 deletions
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 86dab21..b207953 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -72,6 +72,10 @@ namespace collector { class SemiSpace; } // namespace collector +namespace allocator { + class RosAlloc; +} // namespace allocator + namespace space { class AllocSpace; class BumpPointerSpace; @@ -97,6 +101,15 @@ class AgeCardVisitor { } }; +enum HomogeneousSpaceCompactResult { + // Success. + kSuccess, + // Reject due to disabled moving GC. + kErrorReject, + // System is shutting down. + kErrorVMShuttingDown, +}; + // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace static constexpr bool kUseRosAlloc = true; @@ -151,7 +164,8 @@ class Heap { bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap, bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc, - bool verify_post_gc_rosalloc); + bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction, + uint64_t min_interval_homogeneous_space_compaction_by_oom); ~Heap(); @@ -499,6 +513,9 @@ class Heap { return rosalloc_space_; } + // Return the corresponding rosalloc space. + space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const; + space::MallocSpace* GetNonMovingSpace() const { return non_moving_space_; } @@ -568,12 +585,19 @@ class Heap { } private: + // Compact source space to target space. void Compact(space::ContinuousMemMapAllocSpace* target_space, - space::ContinuousMemMapAllocSpace* source_space) + space::ContinuousMemMapAllocSpace* source_space, + GcCause gc_cause) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); + bool SupportHSpaceCompaction() const { + // Returns true if we can do hspace compaction. + return main_space_backup_ != nullptr; + } + static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { return allocator_type != kAllocatorTypeBumpPointer && @@ -584,7 +608,8 @@ class Heap { } static bool IsMovingGc(CollectorType collector_type) { return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS || - collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC; + collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC || + collector_type == kCollectorTypeHomogeneousSpaceCompact; } bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -682,10 +707,18 @@ class Heap { // Find a collector based on GC type. collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type); - // Create the main free list space, typically either a RosAlloc space or DlMalloc space. + // Create a new alloc space and compact default alloc space to it. + HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact(); + + // Create the main free list malloc space, either a RosAlloc space or DlMalloc space. void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit, size_t capacity); + // Create a malloc space based on a mem map. Does not set the space as default. + space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size, + size_t growth_limit, size_t capacity, + const char* name, bool can_move_objects); + // Given the current contents of the alloc space, increase the allowed heap footprint to match // the target utilization ratio. This should only be called immediately after a full garbage // collection. @@ -972,6 +1005,30 @@ class Heap { const bool running_on_valgrind_; const bool use_tlab_; + // Pointer to the space which becomes the new main space when we do homogeneous space compaction. + space::MallocSpace* main_space_backup_; + + // Minimal interval allowed between two homogeneous space compactions caused by OOM. + uint64_t min_interval_homogeneous_space_compaction_by_oom_; + + // Times of the last homogeneous space compaction caused by OOM. + uint64_t last_time_homogeneous_space_compaction_by_oom_; + + // Saved OOMs by homogeneous space compaction. + Atomic<size_t> count_delayed_oom_; + + // Count for requested homogeneous space compaction. + Atomic<size_t> count_requested_homogeneous_space_compaction_; + + // Count for ignored homogeneous space compaction. + Atomic<size_t> count_ignored_homogeneous_space_compaction_; + + // Count for performed homogeneous space compaction. + Atomic<size_t> count_performed_homogeneous_space_compaction_; + + // Whether or not we use homogeneous space compaction to avoid OOM errors. + bool use_homogeneous_space_compaction_for_oom_; + friend class collector::GarbageCollector; friend class collector::MarkCompact; friend class collector::MarkSweep; |