summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-03-28 16:22:20 -0700
committerMathieu Chartier <mathieuc@google.com>2014-03-28 16:24:55 -0700
commit5ae2c9335a0c4eeaf4c67a52d066f16aad1aa907 (patch)
treef3d6a0d2a5e67ac5bcfe984f196465078ba9d7a5 /runtime/gc
parent17b3fb82db845bae4713ca9cbe2f0c9a2dd2abc2 (diff)
downloadart-5ae2c9335a0c4eeaf4c67a52d066f16aad1aa907.zip
art-5ae2c9335a0c4eeaf4c67a52d066f16aad1aa907.tar.gz
art-5ae2c9335a0c4eeaf4c67a52d066f16aad1aa907.tar.bz2
Fix non concurrent mark sweep ergonomics.
Previously we would continue to do sticky GC until the sticky GC did not free enough memory for the allocation, this was excessive since it could do one sticky GC per allocation. The new logic uses the next GC type before trying all the GCs in the plan. Before memalloc benchmark (non concurrent mark sweep): Total time spent in GC: 11.212701s Score: 7790 After: Total time spent in GC: 9.422676s Score: 6870 Change-Id: Iba75b70ea825ef3fd4b3e064d4f12c2fe5a3b176
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/heap.cc18
1 files changed, 17 insertions, 1 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 90ee955..1a32a9a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1157,13 +1157,29 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
}
+ collector::GcType tried_type = next_gc_type_;
+ if (ptr == nullptr) {
+ const bool gc_ran =
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ *klass = sirt_klass.get();
+ return nullptr;
+ }
+ if (gc_ran) {
+ ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ }
+ }
+
// Loop through our different Gc types and try to Gc until we get enough free memory.
for (collector::GcType gc_type : gc_plan_) {
if (ptr != nullptr) {
break;
}
+ if (gc_type == tried_type) {
+ continue;
+ }
// Attempt to run the collector, if we succeed, re-try the allocation.
- bool gc_ran =
+ const bool gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
*klass = sirt_klass.get();