summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/allocator/rosalloc.cc55
-rw-r--r--runtime/gc/allocator/rosalloc.h2
-rw-r--r--runtime/gc/collector/garbage_collector.cc10
-rw-r--r--runtime/gc/collector/garbage_collector.h3
-rw-r--r--runtime/gc/heap.cc41
-rw-r--r--runtime/gc/space/rosalloc_space.cc6
6 files changed, 96 insertions, 21 deletions
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 920741f..cbefa6a 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -2005,6 +2005,61 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
}
}
+size_t RosAlloc::ReleasePages() {
+ VLOG(heap) << "RosAlloc::ReleasePages()";
+ DCHECK(!DoesReleaseAllPages());
+ Thread* self = Thread::Current();
+ size_t reclaimed_bytes = 0;
+ size_t i = 0;
+ while (true) {
+ MutexLock mu(self, lock_);
+ // Check the page map size which might have changed due to grow/shrink.
+ size_t pm_end = page_map_size_;
+ if (i >= pm_end) {
+ // Reached the end.
+ break;
+ }
+ byte pm = page_map_[i];
+ switch (pm) {
+ case kPageMapEmpty: {
+ // The start of a free page run. Release pages.
+ FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
+ DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
+ size_t fpr_size = fpr->ByteSize(this);
+ DCHECK(IsAligned<kPageSize>(fpr_size));
+ byte* start = reinterpret_cast<byte*>(fpr);
+ if (kIsDebugBuild) {
+ // In the debug build, the first page of a free page run
+ // contains a magic number for debugging. Exclude it.
+ start = reinterpret_cast<byte*>(fpr) + kPageSize;
+ }
+ byte* end = reinterpret_cast<byte*>(fpr) + fpr_size;
+ CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
+ reclaimed_bytes += fpr_size;
+ size_t num_pages = fpr_size / kPageSize;
+ if (kIsDebugBuild) {
+ for (size_t j = i + 1; j < i + num_pages; ++j) {
+ DCHECK_EQ(page_map_[j], kPageMapEmpty);
+ }
+ }
+ i += num_pages;
+ DCHECK_LE(i, pm_end);
+ break;
+ }
+ case kPageMapLargeObject: // Fall through.
+ case kPageMapLargeObjectPart: // Fall through.
+ case kPageMapRun: // Fall through.
+ case kPageMapRunPart: // Fall through.
+ ++i;
+ break; // Skip.
+ default:
+ LOG(FATAL) << "Unreachable - page map type: " << pm;
+ break;
+ }
+ }
+ return reclaimed_bytes;
+}
+
} // namespace allocator
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0b4b189..5d9d75c 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -539,6 +539,8 @@ class RosAlloc {
void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg)
LOCKS_EXCLUDED(lock_);
+ // Release empty pages.
+ size_t ReleasePages() LOCKS_EXCLUDED(lock_);
// Returns the current footprint.
size_t Footprint() LOCKS_EXCLUDED(lock_);
// Returns the current capacity, maximum footprint.
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 07951e0..a700c73 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -201,7 +201,15 @@ uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
// Add 1ms to prevent possible division by 0.
- return (freed_bytes_ * 1000) / (NsToMs(GetDurationNs()) + 1);
+ return (static_cast<uint64_t>(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1);
+}
+
+void GarbageCollector::ResetMeasurements() {
+ cumulative_timings_.Reset();
+ pause_histogram_.Reset();
+ total_time_ns_ = 0;
+ total_freed_objects_ = 0;
+ total_freed_bytes_ = 0;
}
} // namespace collector
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 5b7b8a2..b19ac3f 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -110,6 +110,9 @@ class GarbageCollector {
return pause_histogram_;
}
+ // Reset the cumulative timings and pause histogram.
+ void ResetMeasurements();
+
// Returns the estimated throughput in bytes / second.
uint64_t GetEstimatedMeanThroughput() const;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index daf0fb3..eb8c7b1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -661,7 +661,7 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
- for (const auto& collector : garbage_collectors_) {
+ for (auto& collector : garbage_collectors_) {
const CumulativeLogger& logger = collector->GetCumulativeTimings();
if (logger.GetTotalNs() != 0) {
os << ConstDumpable<CumulativeLogger>(logger);
@@ -681,6 +681,7 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
total_duration += total_ns;
total_paused_time += total_pause_ns;
}
+ collector->ResetMeasurements();
}
uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
if (total_duration != 0) {
@@ -915,8 +916,16 @@ void Heap::DoPendingTransitionOrTrim() {
// Transition the collector if the desired collector type is not the same as the current
// collector type.
TransitionCollector(desired_collector_type);
- // Do a heap trim if it is needed.
- Trim();
+ if (!CareAboutPauseTimes()) {
+ // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
+ // about pauses.
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll();
+ runtime->GetMonitorList()->DeflateMonitors();
+ runtime->GetThreadList()->ResumeAll();
+ // Do a heap trim if it is needed.
+ Trim();
+ }
}
void Heap::Trim() {
@@ -2661,6 +2670,10 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint
}
void Heap::RequestHeapTrim() {
+ // Request a heap trim only if we do not currently care about pause times.
+ if (CareAboutPauseTimes()) {
+ return;
+ }
// GC completed and now we must decide whether to request a heap trim (advising pages back to the
// kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
// a space it will hold its lock and can become a cause of jank.
@@ -2682,21 +2695,17 @@ void Heap::RequestHeapTrim() {
// as we don't hold the lock while requesting the trim).
return;
}
-
- // Request a heap trim only if we do not currently care about pause times.
- if (!CareAboutPauseTimes()) {
- {
- MutexLock mu(self, *heap_trim_request_lock_);
- if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
- // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
- // just yet.
- return;
- }
- heap_trim_request_pending_ = true;
+ {
+ MutexLock mu(self, *heap_trim_request_lock_);
+ if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
+ // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
+ // just yet.
+ return;
}
- // Notify the daemon thread which will actually do the heap trim.
- SignalHeapTrimDaemon(self);
+ heap_trim_request_pending_ = true;
}
+ // Notify the daemon thread which will actually do the heap trim.
+ SignalHeapTrimDaemon(self);
}
void Heap::SignalHeapTrimDaemon(Thread* self) {
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 012267b..5c5e7f8 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -222,6 +222,7 @@ extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intpt
}
size_t RosAllocSpace::Trim() {
+ VLOG(heap) << "RosAllocSpace::Trim() ";
{
MutexLock mu(Thread::Current(), lock_);
// Trim to release memory at the end of the space.
@@ -229,10 +230,7 @@ size_t RosAllocSpace::Trim() {
}
// Attempt to release pages if it does not release all empty pages.
if (!rosalloc_->DoesReleaseAllPages()) {
- VLOG(heap) << "RosAllocSpace::Trim() ";
- size_t reclaimed = 0;
- InspectAllRosAlloc(DlmallocMadviseCallback, &reclaimed, false);
- return reclaimed;
+ return rosalloc_->ReleasePages();
}
return 0;
}