From d9a88de76de4c81ad75340b824df64a68c739351 Mon Sep 17 00:00:00 2001 From: Hiroshi Yamauchi Date: Mon, 7 Apr 2014 13:52:31 -0700 Subject: Implement rosalloc page trimming without suspending threads. Also, making it more efficient by not going through the chunks smaller than the page size by not using InspectAll(). Change-Id: I79ceb0374cb8aba5f6b8dde1afbace9af98b6cff --- runtime/gc/allocator/rosalloc.cc | 55 ++++++++++++++++++++++++++++++++++++++ runtime/gc/allocator/rosalloc.h | 2 ++ runtime/gc/space/rosalloc_space.cc | 6 ++--- 3 files changed, 59 insertions(+), 4 deletions(-) (limited to 'runtime/gc') diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index 920741f..cbefa6a 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -2005,6 +2005,61 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) { } } +size_t RosAlloc::ReleasePages() { + VLOG(heap) << "RosAlloc::ReleasePages()"; + DCHECK(!DoesReleaseAllPages()); + Thread* self = Thread::Current(); + size_t reclaimed_bytes = 0; + size_t i = 0; + while (true) { + MutexLock mu(self, lock_); + // Check the page map size which might have changed due to grow/shrink. + size_t pm_end = page_map_size_; + if (i >= pm_end) { + // Reached the end. + break; + } + byte pm = page_map_[i]; + switch (pm) { + case kPageMapEmpty: { + // The start of a free page run. Release pages. + FreePageRun* fpr = reinterpret_cast(base_ + i * kPageSize); + DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end()); + size_t fpr_size = fpr->ByteSize(this); + DCHECK(IsAligned(fpr_size)); + byte* start = reinterpret_cast(fpr); + if (kIsDebugBuild) { + // In the debug build, the first page of a free page run + // contains a magic number for debugging. Exclude it. + start = reinterpret_cast(fpr) + kPageSize; + } + byte* end = reinterpret_cast(fpr) + fpr_size; + CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0); + reclaimed_bytes += fpr_size; + size_t num_pages = fpr_size / kPageSize; + if (kIsDebugBuild) { + for (size_t j = i + 1; j < i + num_pages; ++j) { + DCHECK_EQ(page_map_[j], kPageMapEmpty); + } + } + i += num_pages; + DCHECK_LE(i, pm_end); + break; + } + case kPageMapLargeObject: // Fall through. + case kPageMapLargeObjectPart: // Fall through. + case kPageMapRun: // Fall through. + case kPageMapRunPart: // Fall through. + ++i; + break; // Skip. + default: + LOG(FATAL) << "Unreachable - page map type: " << pm; + break; + } + } + return reclaimed_bytes; +} + } // namespace allocator } // namespace gc } // namespace art diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index 0b4b189..5d9d75c 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -539,6 +539,8 @@ class RosAlloc { void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg) LOCKS_EXCLUDED(lock_); + // Release empty pages. + size_t ReleasePages() LOCKS_EXCLUDED(lock_); // Returns the current footprint. size_t Footprint() LOCKS_EXCLUDED(lock_); // Returns the current capacity, maximum footprint. diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index 012267b..5c5e7f8 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -222,6 +222,7 @@ extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intpt } size_t RosAllocSpace::Trim() { + VLOG(heap) << "RosAllocSpace::Trim() "; { MutexLock mu(Thread::Current(), lock_); // Trim to release memory at the end of the space. @@ -229,10 +230,7 @@ size_t RosAllocSpace::Trim() { } // Attempt to release pages if it does not release all empty pages. if (!rosalloc_->DoesReleaseAllPages()) { - VLOG(heap) << "RosAllocSpace::Trim() "; - size_t reclaimed = 0; - InspectAllRosAlloc(DlmallocMadviseCallback, &reclaimed, false); - return reclaimed; + return rosalloc_->ReleasePages(); } return 0; } -- cgit v1.1