summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorHiroshi Yamauchi <yamauchi@google.com>2014-05-07 13:12:43 -0700
committerHiroshi Yamauchi <yamauchi@google.com>2014-05-07 14:20:20 -0700
commit24faeb23de9aa3038cac6655b575a4b991a6add4 (patch)
treee6903d7a8a65c4627730eb7b8619ac4f0dd9497e /runtime/gc
parent069849e1469b55984d9e208b2ada345aa57f8947 (diff)
downloadart-24faeb23de9aa3038cac6655b575a4b991a6add4.zip
art-24faeb23de9aa3038cac6655b575a4b991a6add4.tar.gz
art-24faeb23de9aa3038cac6655b575a4b991a6add4.tar.bz2
Trigger GSS whole-heap collection based on large object allocations.
This is to avoid a large amount of accumulated dead large objects. For example, in Maps, there was a case where ~100 MB of large objects were accumulated before collected. Bug: 11650816 Change-Id: Ie0ab1561282902a823e110d45f87146ba41fe6f8
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/collector/semi_space.cc53
-rw-r--r--runtime/gc/collector/semi_space.h8
2 files changed, 26 insertions, 35 deletions
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f5d6299..a0659e7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -64,8 +64,8 @@ namespace collector {
static constexpr bool kProtectFromSpace = true;
static constexpr bool kStoreStackTraces = false;
-static constexpr bool kUseBytesPromoted = true;
static constexpr size_t kBytesPromotedThreshold = 4 * MB;
+static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
void SemiSpace::BindBitmaps() {
timings_.StartSplit("BindBitmaps");
@@ -104,8 +104,8 @@ SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_pref
last_gc_to_space_end_(nullptr),
bytes_promoted_(0),
bytes_promoted_since_last_whole_heap_collection_(0),
+ large_object_bytes_allocated_at_last_whole_heap_collection_(0),
whole_heap_collection_(true),
- whole_heap_collection_interval_counter_(0),
collector_name_(name_),
swap_semi_spaces_(true) {
}
@@ -187,12 +187,8 @@ void SemiSpace::MarkingPhase() {
if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
clear_soft_references_) {
// If an explicit, native allocation-triggered, or last attempt
- // collection, collect the whole heap (and reset the interval
- // counter to be consistent.)
+ // collection, collect the whole heap.
whole_heap_collection_ = true;
- if (!kUseBytesPromoted) {
- whole_heap_collection_interval_counter_ = 0;
- }
}
if (whole_heap_collection_) {
VLOG(heap) << "Whole heap collection";
@@ -798,32 +794,27 @@ void SemiSpace::FinishPhase() {
// only space collection at the next collection by updating
// whole_heap_collection.
if (!whole_heap_collection_) {
- if (!kUseBytesPromoted) {
- // Enable whole_heap_collection once every
- // kDefaultWholeHeapCollectionInterval collections.
- --whole_heap_collection_interval_counter_;
- DCHECK_GE(whole_heap_collection_interval_counter_, 0);
- if (whole_heap_collection_interval_counter_ == 0) {
- whole_heap_collection_ = true;
- }
- } else {
- // Enable whole_heap_collection if the bytes promoted since
- // the last whole heap collection exceeds a threshold.
- bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
- if (bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold) {
- whole_heap_collection_ = true;
- }
+ // Enable whole_heap_collection if the bytes promoted since the
+ // last whole heap collection or the large object bytes
+ // allocated exceeds a threshold.
+ bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
+ bool bytes_promoted_threshold_exceeded =
+ bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
+ uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+ uint64_t last_los_bytes_allocated =
+ large_object_bytes_allocated_at_last_whole_heap_collection_;
+ bool large_object_bytes_threshold_exceeded =
+ current_los_bytes_allocated >=
+ last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
+ if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
+ whole_heap_collection_ = true;
}
} else {
- if (!kUseBytesPromoted) {
- DCHECK_EQ(whole_heap_collection_interval_counter_, 0);
- whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval;
- whole_heap_collection_ = false;
- } else {
- // Reset it.
- bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
- whole_heap_collection_ = false;
- }
+ // Reset the counters.
+ bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
+ large_object_bytes_allocated_at_last_whole_heap_collection_ =
+ GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+ whole_heap_collection_ = false;
}
}
// Clear all of the spaces' mark bitmaps.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 3b3e1b1..9fdf471 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -234,14 +234,14 @@ class SemiSpace : public GarbageCollector {
// the non-moving space, since the last whole heap collection.
uint64_t bytes_promoted_since_last_whole_heap_collection_;
+ // Used for the generational mode. Keeps track of how many bytes of
+ // large objects were allocated at the last whole heap collection.
+ uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
+
// Used for the generational mode. When true, collect the whole
// heap. When false, collect only the bump pointer spaces.
bool whole_heap_collection_;
- // Used for the generational mode. A counter used to enable
- // whole_heap_collection_ once per interval.
- int whole_heap_collection_interval_counter_;
-
// How many objects and bytes we moved, used so that we don't need to get the size of the
// to_space_ when calculating how many objects and bytes we freed.
size_t bytes_moved_;