summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/collector/semi_space.cc11
-rw-r--r--runtime/gc/heap.cc342
-rw-r--r--runtime/gc/heap.h32
-rw-r--r--runtime/gc/space/bump_pointer_space.cc7
-rw-r--r--runtime/gc/space/bump_pointer_space.h11
-rw-r--r--runtime/gc/space/dlmalloc_space-inl.h2
-rw-r--r--runtime/gc/space/dlmalloc_space.cc43
-rw-r--r--runtime/gc/space/dlmalloc_space.h20
-rw-r--r--runtime/gc/space/dlmalloc_space_base_test.cc2
-rw-r--r--runtime/gc/space/dlmalloc_space_random_test.cc2
-rw-r--r--runtime/gc/space/dlmalloc_space_static_test.cc2
-rw-r--r--runtime/gc/space/image_space.h4
-rw-r--r--runtime/gc/space/large_object_space.h4
-rw-r--r--runtime/gc/space/malloc_space.cc8
-rw-r--r--runtime/gc/space/malloc_space.h17
-rw-r--r--runtime/gc/space/rosalloc_space.cc48
-rw-r--r--runtime/gc/space/rosalloc_space.h17
-rw-r--r--runtime/gc/space/rosalloc_space_base_test.cc2
-rw-r--r--runtime/gc/space/rosalloc_space_random_test.cc2
-rw-r--r--runtime/gc/space/rosalloc_space_static_test.cc2
-rw-r--r--runtime/gc/space/space.h8
-rw-r--r--runtime/gc/space/valgrind_malloc_space-inl.h6
-rw-r--r--runtime/gc/space/valgrind_malloc_space.h2
-rw-r--r--runtime/gc/space/zygote_space.cc4
-rw-r--r--runtime/gc/space/zygote_space.h5
25 files changed, 316 insertions, 287 deletions
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index e82d533..6e31cb7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -63,7 +63,6 @@ namespace gc {
namespace collector {
static constexpr bool kProtectFromSpace = true;
-static constexpr bool kClearFromSpace = true;
static constexpr bool kStoreStackTraces = false;
static constexpr bool kUseBytesPromoted = true;
static constexpr size_t kBytesPromotedThreshold = 4 * MB;
@@ -122,6 +121,7 @@ void SemiSpace::InitializePhase() {
// Do any pre GC verification.
timings_.NewSplit("PreGcVerification");
heap_->PreGcVerification(this);
+ CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
// Set the initial bitmap.
to_space_live_bitmap_ = to_space_->GetLiveBitmap();
}
@@ -182,9 +182,6 @@ void SemiSpace::MarkingPhase() {
Locks::mutator_lock_->AssertExclusiveHeld(self_);
TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
- // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
- // wrong space.
- heap_->SwapSemiSpaces();
if (generational_) {
// If last_gc_to_space_end_ is out of the bounds of the from-space
// (the to-space from last GC), then point it to the beginning of
@@ -414,11 +411,7 @@ void SemiSpace::ReclaimPhase() {
TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
GetHeap()->UnBindBitmaps();
}
- if (kClearFromSpace) {
- // Release the memory used by the from space.
- from_space_->Clear();
- }
- from_space_->Reset();
+ from_space_->Clear();
// Protect the from space.
VLOG(heap) << "Protecting space " << *from_space_;
if (kProtectFromSpace) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index fcf9fe9..efd1c7a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -81,10 +81,15 @@ static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
// relative to partial/full GC. This is desirable since sticky GCs interfere less with mutator
// threads (lower pauses, use less memory bandwidth).
static constexpr double kStickyGcThroughputAdjustment = 1.25;
+// Whether or not we use the free list large object space.
+static constexpr bool kUseFreeListSpaceForLOS = false;
+// Whtehr or not we compact the zygote in PreZygoteFork.
+static constexpr bool kCompactZygote = kMovingCollector;
+static constexpr size_t kNonMovingSpaceCapacity = 64 * MB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, size_t capacity, const std::string& image_file_name,
- CollectorType post_zygote_collector_type, CollectorType background_collector_type,
+ CollectorType foreground_collector_type, CollectorType background_collector_type,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_log_threshold, size_t long_gc_log_threshold,
bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
@@ -95,9 +100,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
dlmalloc_space_(nullptr),
main_space_(nullptr),
collector_type_(kCollectorTypeNone),
- post_zygote_collector_type_(post_zygote_collector_type),
+ foreground_collector_type_(foreground_collector_type),
background_collector_type_(background_collector_type),
- desired_collector_type_(collector_type_),
+ desired_collector_type_(foreground_collector_type_),
heap_trim_request_lock_(nullptr),
last_trim_time_(0),
heap_transition_target_time_(0),
@@ -162,15 +167,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
if (!is_zygote) {
- desired_collector_type_ = post_zygote_collector_type_;
large_object_threshold_ = kDefaultLargeObjectThreshold;
- } else {
- if (kMovingCollector) {
- // We are the zygote, use bump pointer allocation + semi space collector.
- bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
- desired_collector_type_ = generational ? kCollectorTypeGSS : kCollectorTypeSS;
- } else {
- desired_collector_type_ = post_zygote_collector_type_;
+ // Background compaction is currently not supported for command line runs.
+ if (background_collector_type_ != foreground_collector_type_) {
+ LOG(WARNING) << "Disabling background compaction for non zygote";
+ background_collector_type_ = foreground_collector_type_;
}
}
ChangeCollector(desired_collector_type_);
@@ -187,73 +188,56 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// isn't going to get in the middle
byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
CHECK_GT(oat_file_end_addr, image_space->End());
- if (oat_file_end_addr > requested_alloc_space_begin) {
- requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
- }
+ requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
}
- MemMap* malloc_space_mem_map = nullptr;
- const char* malloc_space_name = is_zygote ? "zygote space" : "alloc space";
if (is_zygote) {
- // Allocate a single mem map that is split into the malloc space
- // and the post zygote non-moving space to put them adjacent.
- size_t post_zygote_non_moving_space_size = 64 * MB;
- size_t non_moving_spaces_size = capacity + post_zygote_non_moving_space_size;
+ // Reserve the address range before we create the non moving space to make sure bitmaps don't
+ // take it.
std::string error_str;
- malloc_space_mem_map = MemMap::MapAnonymous(malloc_space_name, requested_alloc_space_begin,
- non_moving_spaces_size, PROT_READ | PROT_WRITE,
- true, &error_str);
- CHECK(malloc_space_mem_map != nullptr) << error_str;
- post_zygote_non_moving_space_mem_map_.reset(malloc_space_mem_map->RemapAtEnd(
- malloc_space_mem_map->Begin() + capacity, "post zygote non-moving space",
- PROT_READ | PROT_WRITE, &error_str));
- CHECK(post_zygote_non_moving_space_mem_map_.get() != nullptr) << error_str;
- VLOG(heap) << "malloc space mem map : " << malloc_space_mem_map;
- VLOG(heap) << "post zygote non-moving space mem map : "
- << post_zygote_non_moving_space_mem_map_.get();
+ MemMap* mem_map = MemMap::MapAnonymous(
+ "main space", requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity,
+ PROT_READ | PROT_WRITE, true, &error_str);
+ CHECK(mem_map != nullptr) << error_str;
+ // Non moving space is always dlmalloc since we currently don't have support for multiple
+ // rosalloc spaces.
+ non_moving_space_ = space::DlMallocSpace::Create(
+ "zygote / non moving space", initial_size, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
+ requested_alloc_space_begin, false);
+ non_moving_space_->SetGrowthLimit(non_moving_space_->Capacity());
+ CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
} else {
- // Allocate a mem map for the malloc space.
std::string error_str;
- malloc_space_mem_map = MemMap::MapAnonymous(malloc_space_name, requested_alloc_space_begin,
- capacity, PROT_READ | PROT_WRITE, true, &error_str);
- CHECK(malloc_space_mem_map != nullptr) << error_str;
- VLOG(heap) << "malloc space mem map : " << malloc_space_mem_map;
- }
- CHECK(malloc_space_mem_map != nullptr);
- space::MallocSpace* malloc_space;
- if (kUseRosAlloc) {
- malloc_space = space::RosAllocSpace::CreateFromMemMap(malloc_space_mem_map, malloc_space_name,
- kDefaultStartingSize, initial_size,
- growth_limit, capacity, low_memory_mode_);
- CHECK(malloc_space != nullptr) << "Failed to create rosalloc space";
- } else {
- malloc_space = space::DlMallocSpace::CreateFromMemMap(malloc_space_mem_map, malloc_space_name,
- kDefaultStartingSize, initial_size,
- growth_limit, capacity);
- CHECK(malloc_space != nullptr) << "Failed to create dlmalloc space";
- }
- VLOG(heap) << "malloc_space : " << malloc_space;
+ MemMap* mem_map = MemMap::MapAnonymous("main/non-moving space", requested_alloc_space_begin,
+ capacity, PROT_READ | PROT_WRITE, true, &error_str);
+ CHECK(mem_map != nullptr) << error_str;
+ // Create the main free list space, which doubles as the non moving space. We can do this since
+ // non zygote means that we won't have any background compaction.
+ CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
+ non_moving_space_ = main_space_;
+ }
+ CHECK(non_moving_space_ != nullptr);
+
+ // We need to create the bump pointer if the foreground collector is a compacting GC. We only
+ // create the bump pointer space if we are not a moving foreground collector but have a moving
+ // background collector since the heap transition code will create the temp space by recycling
+ // the bitmap from the main space.
if (kMovingCollector) {
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
- // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
- // issues.
- const size_t bump_pointer_space_size = std::min(malloc_space->Capacity(), 128 * MB);
- bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
- bump_pointer_space_size, nullptr);
+ bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space", capacity, nullptr);
CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(bump_pointer_space_);
- temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
- nullptr);
+ temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", capacity, nullptr);
CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(temp_space_);
- VLOG(heap) << "bump_pointer_space : " << bump_pointer_space_;
- VLOG(heap) << "temp_space : " << temp_space_;
}
- non_moving_space_ = malloc_space;
- malloc_space->SetFootprintLimit(malloc_space->Capacity());
- AddSpace(malloc_space);
+ if (non_moving_space_ != main_space_) {
+ AddSpace(non_moving_space_);
+ }
+ if (main_space_ != nullptr) {
+ AddSpace(main_space_);
+ }
// Allocate the large object space.
- constexpr bool kUseFreeListSpaceForLOS = false;
if (kUseFreeListSpaceForLOS) {
large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
} else {
@@ -268,11 +252,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// Relies on the spaces being sorted.
byte* heap_begin = continuous_spaces_.front()->Begin();
byte* heap_end = continuous_spaces_.back()->Limit();
- if (is_zygote) {
- CHECK(post_zygote_non_moving_space_mem_map_.get() != nullptr);
- heap_begin = std::min(post_zygote_non_moving_space_mem_map_->Begin(), heap_begin);
- heap_end = std::max(post_zygote_non_moving_space_mem_map_->End(), heap_end);
- }
size_t heap_capacity = heap_end - heap_begin;
// Allocate the card table.
@@ -292,6 +271,12 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
AddRememberedSet(non_moving_space_rem_set);
+ if (main_space_ != nullptr && main_space_ != non_moving_space_) {
+ accounting::RememberedSet* main_space_rem_set =
+ new accounting::RememberedSet("Main space remembered set", this, main_space_);
+ CHECK(main_space_rem_set != nullptr) << "Failed to create main space remembered set";
+ AddRememberedSet(main_space_rem_set);
+ }
}
// TODO: Count objects in the image space here.
@@ -329,7 +314,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
if (kMovingCollector) {
// TODO: Clean this up.
- bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
+ bool generational = foreground_collector_type_ == kCollectorTypeGSS;
semi_space_collector_ = new collector::SemiSpace(this, generational,
generational ? "generational" : "");
garbage_collectors_.push_back(semi_space_collector_);
@@ -347,6 +332,37 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
+void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
+ size_t capacity) {
+ // Is background compaction is enabled?
+ bool can_move_objects = IsMovingGc(background_collector_type_) !=
+ IsMovingGc(foreground_collector_type_);
+ // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
+ // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
+ // from the main space to the zygote space. If background compaction is enabled, always pass in
+ // that we can move objets.
+ if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
+ // After the zygote we want this to be false if we don't have background compaction enabled so
+ // that getting primitive array elements is faster.
+ can_move_objects = !have_zygote_space_;
+ }
+ if (kUseRosAlloc) {
+ main_space_ = space::RosAllocSpace::CreateFromMemMap(mem_map, "main rosalloc space",
+ kDefaultStartingSize, initial_size,
+ growth_limit, capacity, low_memory_mode_,
+ can_move_objects);
+ CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
+ } else {
+ main_space_ = space::DlMallocSpace::CreateFromMemMap(mem_map, "main dlmalloc space",
+ kDefaultStartingSize, initial_size,
+ growth_limit, capacity,
+ can_move_objects);
+ CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
+ }
+ main_space_->SetFootprintLimit(main_space_->Capacity());
+ VLOG(heap) << "Created main space " << main_space_;
+}
+
void Heap::ChangeAllocator(AllocatorType allocator) {
if (current_allocator_ != allocator) {
// These two allocators are only used internally and don't have any entrypoints.
@@ -360,13 +376,13 @@ void Heap::ChangeAllocator(AllocatorType allocator) {
}
void Heap::DisableCompaction() {
- if (IsCompactingGC(post_zygote_collector_type_)) {
- post_zygote_collector_type_ = kCollectorTypeCMS;
+ if (IsMovingGc(foreground_collector_type_)) {
+ foreground_collector_type_ = kCollectorTypeCMS;
}
- if (IsCompactingGC(background_collector_type_)) {
- background_collector_type_ = post_zygote_collector_type_;
+ if (IsMovingGc(background_collector_type_)) {
+ background_collector_type_ = foreground_collector_type_;
}
- TransitionCollector(post_zygote_collector_type_);
+ TransitionCollector(foreground_collector_type_);
}
std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
@@ -428,14 +444,6 @@ void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
break;
}
}
- if (space == nullptr) {
- if (allocator_mem_map_.get() == nullptr || !allocator_mem_map_->HasAddress(obj)) {
- stream << "obj " << obj << " not a valid heap address";
- return;
- } else if (allocator_mem_map_.get() != nullptr) {
- allocator_mem_map_->Protect(PROT_READ | PROT_WRITE);
- }
- }
// Unprotect all the spaces.
for (const auto& space : continuous_spaces_) {
mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
@@ -478,7 +486,7 @@ void Heap::IncrementDisableMovingGC(Thread* self) {
ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
++disable_moving_gc_count_;
- if (IsCompactingGC(collector_type_running_)) {
+ if (IsMovingGc(collector_type_running_)) {
WaitForGcToCompleteLocked(self);
}
}
@@ -496,12 +504,12 @@ void Heap::UpdateProcessState(ProcessState process_state) {
// Start at index 1 to avoid "is always false" warning.
// Have iteration 1 always transition the collector.
TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
- ? post_zygote_collector_type_ : background_collector_type_);
+ ? foreground_collector_type_ : background_collector_type_);
usleep(kCollectorTransitionStressWait);
}
if (process_state_ == kProcessStateJankPerceptible) {
// Transition back to foreground right away to prevent jank.
- RequestCollectorTransition(post_zygote_collector_type_, 0);
+ RequestCollectorTransition(foreground_collector_type_, 0);
} else {
// Don't delay for debug builds since we may want to stress test the GC.
RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
@@ -626,6 +634,10 @@ void Heap::RemoveSpace(space::Space* space) {
}
if (continuous_space == main_space_) {
main_space_ = nullptr;
+ } else if (continuous_space == bump_pointer_space_) {
+ bump_pointer_space_ = nullptr;
+ } else if (continuous_space == temp_space_) {
+ temp_space_ = nullptr;
}
} else {
DCHECK(space->IsDiscontinuousSpace());
@@ -966,8 +978,10 @@ void Heap::Trim() {
managed_reclaimed += alloc_space->Trim();
}
}
- total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
- bump_pointer_space_->Size();
+ total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
+ if (bump_pointer_space_ != nullptr) {
+ total_alloc_space_allocated -= bump_pointer_space_->Size();
+ }
const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
static_cast<float>(total_alloc_space_size);
uint64_t gc_heap_end_ns = NanoTime();
@@ -1399,7 +1413,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
const bool copying_transition =
- IsCompactingGC(background_collector_type_) || IsCompactingGC(post_zygote_collector_type_);
+ IsMovingGc(background_collector_type_) || IsMovingGc(foreground_collector_type_);
// Busy wait until we can GC (StartGC can fail if we have a non-zero
// compacting_gc_disable_count_, this should rarely occurs).
for (;;) {
@@ -1430,42 +1444,20 @@ void Heap::TransitionCollector(CollectorType collector_type) {
case kCollectorTypeSS:
// Fall-through.
case kCollectorTypeGSS: {
- mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
- CHECK(main_space_ != nullptr);
- Compact(temp_space_, main_space_);
- DCHECK(allocator_mem_map_.get() == nullptr);
- allocator_mem_map_.reset(main_space_->ReleaseMemMap());
- madvise(main_space_->Begin(), main_space_->Size(), MADV_DONTNEED);
- // RemoveSpace does not delete the removed space.
- space::Space* old_space = main_space_;
- RemoveSpace(old_space);
- delete old_space;
+ if (!IsMovingGc(collector_type_)) {
+ // We are transitioning from non moving GC -> moving GC, since we copied from the bump
+ // pointer space last transition it will be protected.
+ bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ Compact(bump_pointer_space_, main_space_);
+ }
break;
}
case kCollectorTypeMS:
// Fall through.
case kCollectorTypeCMS: {
- if (IsCompactingGC(collector_type_)) {
- // TODO: Use mem-map from temp space?
- MemMap* mem_map = allocator_mem_map_.release();
- CHECK(mem_map != nullptr);
- size_t starting_size = kDefaultStartingSize;
- size_t initial_size = kDefaultInitialSize;
- mprotect(mem_map->Begin(), initial_size, PROT_READ | PROT_WRITE);
- CHECK(main_space_ == nullptr);
- if (kUseRosAlloc) {
- main_space_ =
- space::RosAllocSpace::CreateFromMemMap(mem_map, "alloc space", starting_size,
- initial_size, mem_map->Size(),
- mem_map->Size(), low_memory_mode_);
- } else {
- main_space_ =
- space::DlMallocSpace::CreateFromMemMap(mem_map, "alloc space", starting_size,
- initial_size, mem_map->Size(),
- mem_map->Size());
- }
- main_space_->SetFootprintLimit(main_space_->Capacity());
- AddSpace(main_space_);
+ if (IsMovingGc(collector_type_)) {
+ // Compact to the main space from the bump pointer space, don't need to swap semispaces.
+ main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Compact(main_space_, bump_pointer_space_);
}
break;
@@ -1661,11 +1653,12 @@ void Heap::PreZygoteFork() {
VLOG(heap) << "Starting PreZygoteFork";
// Trim the pages at the end of the non moving space.
non_moving_space_->Trim();
+ // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
+ // there.
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
// Change the collector to the post zygote one.
- ChangeCollector(post_zygote_collector_type_);
- // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
- if (semi_space_collector_ != nullptr) {
+ if (kCompactZygote) {
+ DCHECK(semi_space_collector_ != nullptr);
// Temporarily disable rosalloc verification because the zygote
// compaction will mess up the rosalloc internal metadata.
ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
@@ -1675,18 +1668,47 @@ void Heap::PreZygoteFork() {
space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
non_moving_space_->Limit());
// Compact the bump pointer space to a new zygote bump pointer space.
- temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
- zygote_collector.SetFromSpace(bump_pointer_space_);
+ bool reset_main_space = false;
+ if (IsMovingGc(collector_type_)) {
+ zygote_collector.SetFromSpace(bump_pointer_space_);
+ } else {
+ CHECK(main_space_ != nullptr);
+ // Copy from the main space.
+ zygote_collector.SetFromSpace(main_space_);
+ reset_main_space = true;
+ }
zygote_collector.SetToSpace(&target_space);
+
+ Runtime::Current()->GetThreadList()->SuspendAll();
zygote_collector.Run(kGcCauseCollectorTransition, false);
- CHECK(temp_space_->IsEmpty());
+ if (IsMovingGc(collector_type_)) {
+ SwapSemiSpaces();
+ }
+ Runtime::Current()->GetThreadList()->ResumeAll();
+
+ if (reset_main_space) {
+ main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
+ MemMap* mem_map = main_space_->ReleaseMemMap();
+ RemoveSpace(main_space_);
+ delete main_space_;
+ main_space_ = nullptr;
+ CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
+ AddSpace(main_space_);
+ } else {
+ bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ }
+ if (temp_space_ != nullptr) {
+ CHECK(temp_space_->IsEmpty());
+ }
total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
// Update the end and write out image.
non_moving_space_->SetEnd(target_space.End());
non_moving_space_->SetLimit(target_space.Limit());
- VLOG(heap) << "Zygote size " << non_moving_space_->Size() << " bytes";
+ VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
}
+ ChangeCollector(foreground_collector_type_);
// Save the old space so that we can remove it after we complete creating the zygote space.
space::MallocSpace* old_alloc_space = non_moving_space_;
// Turn the current alloc space into a zygote space and obtain the new alloc space composed of
@@ -1706,18 +1728,12 @@ void Heap::PreZygoteFork() {
}
space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
low_memory_mode_,
- &main_space_);
+ &non_moving_space_);
delete old_alloc_space;
CHECK(zygote_space != nullptr) << "Failed creating zygote space";
AddSpace(zygote_space, false);
- CHECK(main_space_ != nullptr);
- if (main_space_->IsRosAllocSpace()) {
- rosalloc_space_ = main_space_->AsRosAllocSpace();
- } else if (main_space_->IsDlMallocSpace()) {
- dlmalloc_space_ = main_space_->AsDlMallocSpace();
- }
- main_space_->SetFootprintLimit(main_space_->Capacity());
- AddSpace(main_space_);
+ non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
+ AddSpace(non_moving_space_);
have_zygote_space_ = true;
// Enable large object space allocations.
large_object_threshold_ = kDefaultLargeObjectThreshold;
@@ -1727,23 +1743,6 @@ void Heap::PreZygoteFork() {
CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
AddModUnionTable(mod_union_table);
if (collector::SemiSpace::kUseRememberedSet) {
- // Add a new remembered set for the new main space.
- accounting::RememberedSet* main_space_rem_set =
- new accounting::RememberedSet("Main space remembered set", this, main_space_);
- CHECK(main_space_rem_set != nullptr) << "Failed to create main space remembered set";
- AddRememberedSet(main_space_rem_set);
- }
- // Can't use RosAlloc for non moving space due to thread local buffers.
- // TODO: Non limited space for non-movable objects?
- MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release();
- space::MallocSpace* new_non_moving_space =
- space::DlMallocSpace::CreateFromMemMap(mem_map, "Non moving dlmalloc space", kPageSize,
- 2 * MB, mem_map->Size(), mem_map->Size());
- AddSpace(new_non_moving_space, false);
- CHECK(new_non_moving_space != nullptr) << "Failed to create new non-moving space";
- new_non_moving_space->SetFootprintLimit(new_non_moving_space->Capacity());
- non_moving_space_ = new_non_moving_space;
- if (collector::SemiSpace::kUseRememberedSet) {
// Add a new remembered set for the post-zygote non-moving space.
accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
@@ -1781,9 +1780,9 @@ void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
}
void Heap::SwapSemiSpaces() {
- // Swap the spaces so we allocate into the space which we just evacuated.
+ CHECK(bump_pointer_space_ != nullptr);
+ CHECK(temp_space_ != nullptr);
std::swap(bump_pointer_space_, temp_space_);
- bump_pointer_space_->Clear();
}
void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -1826,7 +1825,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(self);
- compacting_gc = IsCompactingGC(collector_type_);
+ compacting_gc = IsMovingGc(collector_type_);
// GC can be disabled if someone has a used GetPrimitiveArrayCritical.
if (compacting_gc && disable_moving_gc_count_ != 0) {
LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
@@ -1881,7 +1880,14 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
<< "Could not find garbage collector with collector_type="
<< static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
- collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
+ if (compacting_gc) {
+ runtime->GetThreadList()->SuspendAll();
+ collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
+ SwapSemiSpaces();
+ runtime->GetThreadList()->ResumeAll();
+ } else {
+ collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
+ }
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
RequestHeapTrim();
@@ -2484,29 +2490,15 @@ void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
bool Heap::IsMovableObject(const mirror::Object* obj) const {
if (kMovingCollector) {
- DCHECK(!IsInTempSpace(obj));
- if (bump_pointer_space_->HasAddress(obj)) {
- return true;
- }
- // TODO: Refactor this logic into the space itself?
- // Objects in the main space are only copied during background -> foreground transitions or
- // visa versa.
- if (main_space_ != nullptr && main_space_->HasAddress(obj) &&
- (IsCompactingGC(background_collector_type_) ||
- IsCompactingGC(post_zygote_collector_type_))) {
- return true;
+ space::Space* space = FindContinuousSpaceFromObject(obj, true);
+ if (space != nullptr) {
+ // TODO: Check large object?
+ return space->CanMoveObjects();
}
}
return false;
}
-bool Heap::IsInTempSpace(const mirror::Object* obj) const {
- if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
- return true;
- }
- return false;
-}
-
void Heap::UpdateMaxNativeFootprint() {
size_t native_size = native_bytes_allocated_;
// TODO: Tune the native heap utilization to be a value other than the java heap utilization.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a8989ec..912cf7d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -150,7 +150,7 @@ class Heap {
explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
size_t max_free, double target_utilization, size_t capacity,
const std::string& original_image_file_name,
- CollectorType post_zygote_collector_type, CollectorType background_collector_type,
+ CollectorType foreground_collector_type, CollectorType background_collector_type,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_threshold, size_t long_gc_threshold,
bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
@@ -196,8 +196,6 @@ class Heap {
void VisitObjects(ObjectCallback callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation);
@@ -249,10 +247,6 @@ class Heap {
// Returns true if there is any chance that the object (obj) will move.
bool IsMovableObject(const mirror::Object* obj) const;
- // Returns true if an object is in the temp space, if this happens its usually indicative of
- // compaction related errors.
- bool IsInTempSpace(const mirror::Object* obj) const;
-
// Enables us to compacting GC until objects are released.
void IncrementDisableMovingGC(Thread* self);
void DecrementDisableMovingGC(Thread* self);
@@ -568,7 +562,8 @@ class Heap {
private:
void Compact(space::ContinuousMemMapAllocSpace* target_space,
- space::ContinuousMemMapAllocSpace* source_space);
+ space::ContinuousMemMapAllocSpace* source_space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
@@ -580,7 +575,7 @@ class Heap {
static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
return AllocatorHasAllocationStack(allocator_type);
}
- static bool IsCompactingGC(CollectorType collector_type) {
+ static bool IsMovingGc(CollectorType collector_type) {
return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
collector_type == kCollectorTypeCC;
}
@@ -609,6 +604,10 @@ class Heap {
size_t bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
+ // wrong space.
+ void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
// that the switch statement is constant optimized in the entrypoints.
template <const bool kInstrumented, const bool kGrow>
@@ -668,6 +667,10 @@ class Heap {
// Find a collector based on GC type.
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
+ // Create the main free list space, typically either a RosAlloc space or DlMalloc space.
+ void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
+ size_t capacity);
+
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
@@ -737,17 +740,10 @@ class Heap {
// A remembered set remembers all of the references from the it's space to the target space.
SafeMap<space::Space*, accounting::RememberedSet*> remembered_sets_;
- // Keep the free list allocator mem map lying around when we transition to background so that we
- // don't have to worry about virtual address space fragmentation.
- UniquePtr<MemMap> allocator_mem_map_;
-
- // The mem-map which we will use for the non-moving space after the zygote is done forking:
- UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_;
-
// The current collector type.
CollectorType collector_type_;
- // Which collector we will switch to after zygote fork.
- CollectorType post_zygote_collector_type_;
+ // Which collector we use when the app is in the foreground.
+ CollectorType foreground_collector_type_;
// Which collector we will use when the app is notified of a transition to background.
CollectorType background_collector_type_;
// Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index a955cc8..6bd0526 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -38,6 +38,10 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac
return new BumpPointerSpace(name, mem_map.release());
}
+BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
+ return new BumpPointerSpace(name, mem_map);
+}
+
BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
: ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
kGcRetentionPolicyAlwaysCollect),
@@ -61,9 +65,6 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
void BumpPointerSpace::Clear() {
// Release the pages back to the operating system.
CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
-}
-
-void BumpPointerSpace::Reset() {
// Reset the end of the space back to the beginning, we move the end forward as we allocate
// objects.
SetEnd(Begin());
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 3ab5df4..ecfeae5 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -43,6 +43,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// guaranteed to be granted, if it is required, the caller should call Begin on the returned
// space to confirm the request was granted.
static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
+ static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
// Allocate num_bytes, returns nullptr if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -92,11 +93,8 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return nullptr;
}
- // Madvise the memory back to the OS.
- void Clear() OVERRIDE;
-
- // Reset the pointer to the start of the space.
- void Reset() OVERRIDE LOCKS_EXCLUDED(block_lock_);
+ // Reset the space to empty.
+ void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
void Dump(std::ostream& os) const;
@@ -113,6 +111,9 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return Begin() == End();
}
+ bool CanMoveObjects() const OVERRIDE {
+ return true;
+ }
bool Contains(const mirror::Object* obj) const {
const byte* byte_obj = reinterpret_cast<const byte*>(obj);
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 02d8b54..4c8a35e 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -52,7 +52,7 @@ inline size_t DlMallocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_
inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(Thread* /*self*/, size_t num_bytes,
size_t* bytes_allocated,
size_t* usable_size) {
- mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_for_alloc_, num_bytes));
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
if (LIKELY(result != NULL)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 60f566c..be88b33 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -36,15 +36,19 @@ static constexpr bool kPrefetchDuringDlMallocFreeList = true;
template class ValgrindMallocSpace<DlMallocSpace, void*>;
DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
- byte* end, byte* limit, size_t growth_limit)
- : MallocSpace(name, mem_map, begin, end, limit, growth_limit),
- mspace_(mspace), mspace_for_alloc_(mspace) {
+ byte* end, byte* limit, size_t growth_limit,
+ bool can_move_objects, size_t starting_size,
+ size_t initial_size)
+ : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+ starting_size, initial_size),
+ mspace_(mspace) {
CHECK(mspace != NULL);
}
DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity) {
+ size_t growth_limit, size_t capacity,
+ bool can_move_objects) {
DCHECK(mem_map != nullptr);
void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
if (mspace == nullptr) {
@@ -62,14 +66,17 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
byte* begin = mem_map->Begin();
if (Runtime::Current()->RunningOnValgrind()) {
return new ValgrindMallocSpace<DlMallocSpace, void*>(
- name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size);
+ name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size,
+ can_move_objects, starting_size);
} else {
- return new DlMallocSpace(name, mem_map, mspace, begin, end, begin + capacity, growth_limit);
+ return new DlMallocSpace(name, mem_map, mspace, begin, end, begin + capacity, growth_limit,
+ can_move_objects, starting_size, initial_size);
}
}
-DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
+ size_t growth_limit, size_t capacity, byte* requested_begin,
+ bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
@@ -93,7 +100,7 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
return nullptr;
}
DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
- growth_limit, capacity);
+ growth_limit, capacity, can_move_objects);
// We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
@@ -143,8 +150,10 @@ mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
void* allocator, byte* begin, byte* end,
- byte* limit, size_t growth_limit) {
- return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit);
+ byte* limit, size_t growth_limit,
+ bool can_move_objects) {
+ return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit,
+ can_move_objects, starting_size_, initial_size_);
}
size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
@@ -280,13 +289,13 @@ uint64_t DlMallocSpace::GetObjectsAllocated() {
}
void DlMallocSpace::Clear() {
+ size_t footprint_limit = GetFootprintLimit();
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
- GetLiveBitmap()->Clear();
- GetMarkBitmap()->Clear();
-}
-
-void DlMallocSpace::Reset() {
- // TODO: Delete and create new mspace here.
+ live_bitmap_->Clear();
+ mark_bitmap_->Clear();
+ end_ = Begin() + starting_size_;
+ mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
+ SetFootprintLimit(footprint_limit);
}
#ifndef NDEBUG
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 76c4489..accd26b 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -36,14 +36,15 @@ class DlMallocSpace : public MallocSpace {
// Create a DlMallocSpace from an existing mem_map.
static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
size_t starting_size, size_t initial_size,
- size_t growth_limit, size_t capacity);
+ size_t growth_limit, size_t capacity,
+ bool can_move_objects);
// Create a DlMallocSpace with the requested sizes. The requested
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm the
// request was granted.
static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin);
+ size_t capacity, byte* requested_begin, bool can_move_objects);
// Virtual to allow ValgrindMallocSpace to intercept.
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -107,13 +108,13 @@ class DlMallocSpace : public MallocSpace {
void SetFootprintLimit(size_t limit) OVERRIDE;
MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit);
+ byte* begin, byte* end, byte* limit, size_t growth_limit,
+ bool can_move_objects);
uint64_t GetBytesAllocated() OVERRIDE;
uint64_t GetObjectsAllocated() OVERRIDE;
- void Clear() OVERRIDE;
- void Reset() OVERRIDE;
+ virtual void Clear() OVERRIDE;
bool IsDlMallocSpace() const OVERRIDE {
return true;
@@ -125,7 +126,8 @@ class DlMallocSpace : public MallocSpace {
protected:
DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
- byte* limit, size_t growth_limit);
+ byte* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
+ size_t initial_size);
private:
mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -142,11 +144,7 @@ class DlMallocSpace : public MallocSpace {
static const size_t kChunkOverhead = kWordSize;
// Underlying malloc space.
- void* const mspace_;
-
- // An mspace pointer used for allocation. Equals mspace_ or nullptr after InvalidateAllocator()
- // is called.
- void* mspace_for_alloc_;
+ void* mspace_;
friend class collector::MarkSweep;
diff --git a/runtime/gc/space/dlmalloc_space_base_test.cc b/runtime/gc/space/dlmalloc_space_base_test.cc
index 508d869..129eace 100644
--- a/runtime/gc/space/dlmalloc_space_base_test.cc
+++ b/runtime/gc/space/dlmalloc_space_base_test.cc
@@ -23,7 +23,7 @@ namespace space {
MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, byte* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
+ return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
}
TEST_SPACE_CREATE_FN_BASE(DlMallocSpace, CreateDlMallocSpace)
diff --git a/runtime/gc/space/dlmalloc_space_random_test.cc b/runtime/gc/space/dlmalloc_space_random_test.cc
index 43a1bf0..c4f8bae 100644
--- a/runtime/gc/space/dlmalloc_space_random_test.cc
+++ b/runtime/gc/space/dlmalloc_space_random_test.cc
@@ -23,7 +23,7 @@ namespace space {
MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, byte* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
+ return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
}
TEST_SPACE_CREATE_FN_RANDOM(DlMallocSpace, CreateDlMallocSpace)
diff --git a/runtime/gc/space/dlmalloc_space_static_test.cc b/runtime/gc/space/dlmalloc_space_static_test.cc
index 4fbc81e..edaa198 100644
--- a/runtime/gc/space/dlmalloc_space_static_test.cc
+++ b/runtime/gc/space/dlmalloc_space_static_test.cc
@@ -23,7 +23,7 @@ namespace space {
MallocSpace* CreateDlMallocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, byte* requested_begin) {
- return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin);
+ return DlMallocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin, false);
}
TEST_SPACE_CREATE_FN_STATIC(DlMallocSpace, CreateDlMallocSpace)
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 116c498..6b63d10 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -75,6 +75,10 @@ class ImageSpace : public MemMapSpace {
void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) {
}
+ bool CanMoveObjects() const OVERRIDE {
+ return false;
+ }
+
private:
// Tries to initialize an ImageSpace from the given image path,
// returning NULL on error.
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index eb01325..18e518f 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -75,6 +75,10 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+ virtual bool CanMoveObjects() const OVERRIDE {
+ return false;
+ }
+
protected:
explicit LargeObjectSpace(const std::string& name);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index dac043e..c3ca096 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -37,10 +37,12 @@ size_t MallocSpace::bitmap_index_ = 0;
MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
byte* begin, byte* end, byte* limit, size_t growth_limit,
- bool create_bitmaps)
+ bool create_bitmaps, bool can_move_objects, size_t starting_size,
+ size_t initial_size)
: ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
recent_free_pos_(0), lock_("allocation space lock", kAllocSpaceLock),
- growth_limit_(growth_limit) {
+ growth_limit_(growth_limit), can_move_objects_(can_move_objects),
+ starting_size_(starting_size), initial_size_(initial_size) {
if (create_bitmaps) {
size_t bitmap_index = bitmap_index_++;
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
@@ -201,7 +203,7 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name);
}
*out_malloc_space = CreateInstance(alloc_space_name, mem_map.release(), allocator, end_, end,
- limit_, growth_limit);
+ limit_, growth_limit, CanMoveObjects());
SetLimit(End());
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index fbcee5f..dd4e5d4 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -114,7 +114,8 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
void SetGrowthLimit(size_t growth_limit);
virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit) = 0;
+ byte* begin, byte* end, byte* limit, size_t growth_limit,
+ bool can_move_objects) = 0;
// Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
// the low memory mode argument specifies that the heap wishes the created space to be more
@@ -127,9 +128,14 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
+ bool CanMoveObjects() const OVERRIDE {
+ return can_move_objects_;
+ }
+
protected:
MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
- byte* limit, size_t growth_limit, bool create_bitmaps = true);
+ byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
+ size_t starting_size, size_t initial_size);
static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
size_t* growth_limit, size_t* capacity, byte* requested_begin);
@@ -167,6 +173,13 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// one time by a call to ClearGrowthLimit.
size_t growth_limit_;
+ // True if objects in the space are movable.
+ const bool can_move_objects_;
+
+ // Starting and initial sized, used when you reset the space.
+ const size_t starting_size_;
+ const size_t initial_size_;
+
private:
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 5c5e7f8..afac2a2 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -34,19 +34,23 @@ namespace space {
static constexpr bool kPrefetchDuringRosAllocFreeList = true;
-template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
+// TODO: Fix
+// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
- byte* limit, size_t growth_limit)
- : MallocSpace(name, mem_map, begin, end, limit, growth_limit), rosalloc_(rosalloc) {
- CHECK(rosalloc != NULL);
+ byte* limit, size_t growth_limit, bool can_move_objects,
+ size_t starting_size, size_t initial_size, bool low_memory_mode)
+ : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+ starting_size, initial_size),
+ rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
+ CHECK(rosalloc != nullptr);
}
RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
size_t starting_size, size_t initial_size,
size_t growth_limit, size_t capacity,
- bool low_memory_mode) {
+ bool low_memory_mode, bool can_move_objects) {
DCHECK(mem_map != nullptr);
allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
capacity, low_memory_mode);
@@ -66,10 +70,10 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
// TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
if (false && Runtime::Current()->GetHeap()->RunningOnValgrind()) {
- return new ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>(
- name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit, initial_size);
+ LOG(FATAL) << "Unimplemented";
} else {
- return new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit);
+ return new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit,
+ can_move_objects, starting_size, initial_size, low_memory_mode);
}
}
@@ -79,7 +83,7 @@ RosAllocSpace::~RosAllocSpace() {
RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
size_t growth_limit, size_t capacity, byte* requested_begin,
- bool low_memory_mode) {
+ bool low_memory_mode, bool can_move_objects) {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
@@ -104,7 +108,8 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
}
RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
- growth_limit, capacity, low_memory_mode);
+ growth_limit, capacity, low_memory_mode,
+ can_move_objects);
// We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
@@ -113,7 +118,8 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
return space;
}
-allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start, size_t initial_size,
+allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
+ size_t initial_size,
size_t maximum_size, bool low_memory_mode) {
// clear errno to allow PLOG on error
errno = 0;
@@ -154,9 +160,11 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
}
MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit) {
+ byte* begin, byte* end, byte* limit, size_t growth_limit,
+ bool can_move_objects) {
return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
- begin, end, limit, growth_limit);
+ begin, end, limit, growth_limit, can_move_objects, starting_size_,
+ initial_size_, low_memory_mode_);
}
size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
@@ -333,13 +341,15 @@ void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
}
void RosAllocSpace::Clear() {
+ size_t footprint_limit = GetFootprintLimit();
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
- GetLiveBitmap()->Clear();
- GetMarkBitmap()->Clear();
-}
-
-void RosAllocSpace::Reset() {
- // TODO: Delete and create new mspace here.
+ live_bitmap_->Clear();
+ mark_bitmap_->Clear();
+ end_ = begin_ + starting_size_;
+ delete rosalloc_;
+ rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_, Capacity(),
+ low_memory_mode_);
+ SetFootprintLimit(footprint_limit);
}
} // namespace space
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 900e7a9..a156738 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -39,11 +39,12 @@ class RosAllocSpace : public MallocSpace {
// the caller should call Begin on the returned space to confirm the
// request was granted.
static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin, bool low_memory_mode);
+ size_t capacity, byte* requested_begin, bool low_memory_mode,
+ bool can_move_objects);
static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
size_t starting_size, size_t initial_size,
size_t growth_limit, size_t capacity,
- bool low_memory_mode);
+ bool low_memory_mode, bool can_move_objects);
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
@@ -80,9 +81,10 @@ class RosAllocSpace : public MallocSpace {
void SetFootprintLimit(size_t limit) OVERRIDE;
void Clear() OVERRIDE;
- void Reset() OVERRIDE;
+
MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- byte* begin, byte* end, byte* limit, size_t growth_limit);
+ byte* begin, byte* end, byte* limit, size_t growth_limit,
+ bool can_move_objects) OVERRIDE;
uint64_t GetBytesAllocated() OVERRIDE;
uint64_t GetObjectsAllocated() OVERRIDE;
@@ -110,7 +112,8 @@ class RosAllocSpace : public MallocSpace {
protected:
RosAllocSpace(const std::string& name, MemMap* mem_map, allocator::RosAlloc* rosalloc,
- byte* begin, byte* end, byte* limit, size_t growth_limit);
+ byte* begin, byte* end, byte* limit, size_t growth_limit, bool can_move_objects,
+ size_t starting_size, size_t initial_size, bool low_memory_mode);
private:
mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -132,7 +135,9 @@ class RosAllocSpace : public MallocSpace {
LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
// Underlying rosalloc.
- allocator::RosAlloc* const rosalloc_;
+ allocator::RosAlloc* rosalloc_;
+
+ const bool low_memory_mode_;
friend class collector::MarkSweep;
diff --git a/runtime/gc/space/rosalloc_space_base_test.cc b/runtime/gc/space/rosalloc_space_base_test.cc
index df42076..c3157fa 100644
--- a/runtime/gc/space/rosalloc_space_base_test.cc
+++ b/runtime/gc/space/rosalloc_space_base_test.cc
@@ -23,7 +23,7 @@ namespace space {
MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, byte* requested_begin) {
return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode());
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
}
TEST_SPACE_CREATE_FN_BASE(RosAllocSpace, CreateRosAllocSpace)
diff --git a/runtime/gc/space/rosalloc_space_random_test.cc b/runtime/gc/space/rosalloc_space_random_test.cc
index 4d37c9e..864bbc9 100644
--- a/runtime/gc/space/rosalloc_space_random_test.cc
+++ b/runtime/gc/space/rosalloc_space_random_test.cc
@@ -23,7 +23,7 @@ namespace space {
MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, byte* requested_begin) {
return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode());
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
}
TEST_SPACE_CREATE_FN_RANDOM(RosAllocSpace, CreateRosAllocSpace)
diff --git a/runtime/gc/space/rosalloc_space_static_test.cc b/runtime/gc/space/rosalloc_space_static_test.cc
index 9f11fd0..c0e2ac8 100644
--- a/runtime/gc/space/rosalloc_space_static_test.cc
+++ b/runtime/gc/space/rosalloc_space_static_test.cc
@@ -23,7 +23,7 @@ namespace space {
MallocSpace* CreateRosAllocSpace(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, byte* requested_begin) {
return RosAllocSpace::Create(name, initial_size, growth_limit, capacity, requested_begin,
- Runtime::Current()->GetHeap()->IsLowMemoryMode());
+ Runtime::Current()->GetHeap()->IsLowMemoryMode(), false);
}
TEST_SPACE_CREATE_FN_STATIC(RosAllocSpace, CreateRosAllocSpace)
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 37d7c80..c9022f1 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -160,6 +160,9 @@ class Space {
}
virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
+ // Returns true if objects in the space are movable.
+ virtual bool CanMoveObjects() const = 0;
+
virtual ~Space() {}
protected:
@@ -396,12 +399,9 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
void SwapBitmaps();
- // Free all memory associated with this space.
+ // Reset the space back to an empty space and release memory.
virtual void Clear() = 0;
- // Reset the space back to an empty space.
- virtual void Reset() = 0;
-
accounting::SpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index ed97e60..966c276 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -95,8 +95,10 @@ template <typename S, typename A>
ValgrindMallocSpace<S, A>::ValgrindMallocSpace(const std::string& name, MemMap* mem_map,
A allocator, byte* begin,
byte* end, byte* limit, size_t growth_limit,
- size_t initial_size) :
- S(name, mem_map, allocator, begin, end, limit, growth_limit) {
+ size_t initial_size,
+ bool can_move_objects, size_t starting_size) :
+ S(name, mem_map, allocator, begin, end, limit, growth_limit, can_move_objects, starting_size,
+ initial_size) {
VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
}
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index 6b755c4..200ad83 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -48,7 +48,7 @@ class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
byte* begin, byte* end, byte* limit, size_t growth_limit,
- size_t initial_size);
+ size_t initial_size, bool can_move_objects, size_t starting_size);
virtual ~ValgrindMallocSpace() {}
private:
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index d1c3d03..a60ab38 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -61,10 +61,6 @@ void ZygoteSpace::Clear() {
LOG(FATAL) << "Unimplemented";
}
-void ZygoteSpace::Reset() {
- LOG(FATAL) << "Unimplemented";
-}
-
ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
: ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyFullCollect),
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 8880548..30370aa 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -72,7 +72,10 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
}
void Clear() OVERRIDE;
- void Reset() OVERRIDE;
+
+ bool CanMoveObjects() const OVERRIDE {
+ return false;
+ }
protected:
virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {