diff options
author | Mathieu Chartier <mathieuc@google.com> | 2015-06-03 13:32:15 -0700 |
---|---|---|
committer | Mathieu Chartier <mathieuc@google.com> | 2015-06-03 15:39:25 -0700 |
commit | 8f23620d45399286564986d2541cda761b3fe0ac (patch) | |
tree | 4a33908d156230887c04e32d478396a7ce46b437 /runtime/gc | |
parent | 99b4dcd19af9343bde1f6fbdb24bb010a70144c9 (diff) | |
download | art-8f23620d45399286564986d2541cda761b3fe0ac.zip art-8f23620d45399286564986d2541cda761b3fe0ac.tar.gz art-8f23620d45399286564986d2541cda761b3fe0ac.tar.bz2 |
Add a way to determine if a large object is a zygote object
Also fix a slight memory leak in LargeObjectMapSpace.
Bug: 20674158
Change-Id: I2416df484e5b84a8c5cc0b5664c8cb102dc235f6
Diffstat (limited to 'runtime/gc')
-rw-r--r-- | runtime/gc/heap.cc | 1 | ||||
-rw-r--r-- | runtime/gc/space/large_object_space.cc | 94 | ||||
-rw-r--r-- | runtime/gc/space/large_object_space.h | 22 | ||||
-rw-r--r-- | runtime/gc/space/large_object_space_test.cc | 22 |
4 files changed, 105 insertions, 34 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 59d0259..f039f6b 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -2252,6 +2252,7 @@ void Heap::PreZygoteFork() { // Set all the cards in the mod-union table since we don't know which objects contain references // to large objects. mod_union_table->SetCards(); + large_object_space_->SetAllLargeObjectsAsZygoteObjects(self); AddModUnionTable(mod_union_table); if (collector::SemiSpace::kUseRememberedSet) { // Add a new remembered set for the post-zygote non-moving space. diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index da4a930..9436c3f 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -41,8 +41,8 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace { // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't // freed since they are held live by the class linker. MutexLock mu(Thread::Current(), lock_); - for (auto& m : mem_maps_) { - delete m.second; + for (auto& m : large_objects_) { + delete m.second.mem_map; } } @@ -139,8 +139,7 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, CHECK(space_bitmap == nullptr) << obj_end << " overlaps with bitmap " << *space_bitmap; } MutexLock mu(self, lock_); - large_objects_.push_back(obj); - mem_maps_.Put(obj, mem_map); + large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */}); const size_t allocation_size = mem_map->BaseSize(); DCHECK(bytes_allocated != nullptr); begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj)); @@ -161,28 +160,43 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, return obj; } +bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const { + MutexLock mu(self, lock_); + auto it = large_objects_.find(obj); + CHECK(it != large_objects_.end()); + return it->second.is_zygote; +} + +void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) { + MutexLock mu(self, lock_); + for (auto& pair : large_objects_) { + pair.second.is_zygote = true; + } +} + size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) { MutexLock mu(self, lock_); - MemMaps::iterator found = mem_maps_.find(ptr); - if (UNLIKELY(found == mem_maps_.end())) { - Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); + auto it = large_objects_.find(ptr); + if (UNLIKELY(it == large_objects_.end())) { + Runtime::Current()->GetHeap()->DumpSpaces(LOG(INTERNAL_FATAL)); LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live"; } - const size_t map_size = found->second->BaseSize(); + MemMap* mem_map = it->second.mem_map; + const size_t map_size = mem_map->BaseSize(); DCHECK_GE(num_bytes_allocated_, map_size); size_t allocation_size = map_size; num_bytes_allocated_ -= allocation_size; --num_objects_allocated_; - delete found->second; - mem_maps_.erase(found); + delete mem_map; + large_objects_.erase(it); return allocation_size; } size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) { MutexLock mu(Thread::Current(), lock_); - auto found = mem_maps_.find(obj); - CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live"; - size_t alloc_size = found->second->BaseSize(); + auto it = large_objects_.find(obj); + CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live"; + size_t alloc_size = it->second.mem_map->BaseSize(); if (usable_size != nullptr) { *usable_size = alloc_size; } @@ -202,8 +216,8 @@ size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object* void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { MutexLock mu(Thread::Current(), lock_); - for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) { - MemMap* mem_map = it->second; + for (auto& pair : large_objects_) { + MemMap* mem_map = pair.second.mem_map; callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg); callback(nullptr, nullptr, 0, arg); } @@ -213,22 +227,25 @@ bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const { Thread* self = Thread::Current(); if (lock_.IsExclusiveHeld(self)) { // We hold lock_ so do the check. - return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end(); + return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end(); } else { MutexLock mu(self, lock_); - return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end(); + return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end(); } } // Keeps track of allocation sizes + whether or not the previous allocation is free. -// Used to coalesce free blocks and find the best fit block for an allocation. +// Used to coalesce free blocks and find the best fit block for an allocation for best fit object +// allocation. Each allocation has an AllocationInfo which contains the size of the previous free +// block preceding it. Implemented in such a way that we can also find the iterator for any +// allocation info pointer. class AllocationInfo { public: AllocationInfo() : prev_free_(0), alloc_size_(0) { } // Return the number of pages that the allocation info covers. size_t AlignSize() const { - return alloc_size_ & ~kFlagFree; + return alloc_size_ & kFlagsMask; } // Returns the allocation size in bytes. size_t ByteSize() const { @@ -237,11 +254,21 @@ class AllocationInfo { // Updates the allocation size and whether or not it is free. void SetByteSize(size_t size, bool free) { DCHECK_ALIGNED(size, FreeListSpace::kAlignment); - alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0U); + alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u); } + // Returns true if the block is free. bool IsFree() const { return (alloc_size_ & kFlagFree) != 0; } + // Return true if the large object is a zygote object. + bool IsZygoteObject() const { + return (alloc_size_ & kFlagZygote) != 0; + } + // Change the object to be a zygote object. + void SetZygoteObject() { + alloc_size_ |= kFlagZygote; + } + // Return true if this is a zygote large object. // Finds and returns the next non free allocation info after ourself. AllocationInfo* GetNextInfo() { return this + AlignSize(); @@ -275,10 +302,9 @@ class AllocationInfo { } private: - // Used to implement best fit object allocation. Each allocation has an AllocationInfo which - // contains the size of the previous free block preceding it. Implemented in such a way that we - // can also find the iterator for any allocation info pointer. - static constexpr uint32_t kFlagFree = 0x8000000; + static constexpr uint32_t kFlagFree = 0x80000000; // If block is free. + static constexpr uint32_t kFlagZygote = 0x40000000; // If the large object is a zygote object. + static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote); // Combined flags for masking. // Contains the size of the previous free block with kAlignment as the unit. If 0 then the // allocation before us is not free. // These variables are undefined in the middle of allocations / free blocks. @@ -493,7 +519,7 @@ mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* byt } void FreeListSpace::Dump(std::ostream& os) const { - MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_)); + MutexLock mu(Thread::Current(), lock_); os << GetName() << " -" << " begin: " << reinterpret_cast<void*>(Begin()) << " end: " << reinterpret_cast<void*>(End()) << "\n"; @@ -519,6 +545,24 @@ void FreeListSpace::Dump(std::ostream& os) const { } } +bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const { + const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj)); + DCHECK(info != nullptr); + return info->IsZygoteObject(); +} + +void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) { + MutexLock mu(self, lock_); + uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_; + for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())), + *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info; + cur_info = cur_info->GetNextInfo()) { + if (!cur_info->IsFree()) { + cur_info->SetZygoteObject(); + } + } +} + void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); space::LargeObjectSpace* space = context->space->AsLargeObjectSpace(); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index d1f9386..45ed0cd 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -98,6 +98,12 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Return true if the large object is a zygote large object. Potentially slow. + virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0; + // Called when we create the zygote space, mark all existing large objects as zygote large + // objects. + virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0; + protected: explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end); static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg); @@ -133,16 +139,20 @@ class LargeObjectMapSpace : public LargeObjectSpace { bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS; protected: + struct LargeObject { + MemMap* mem_map; + bool is_zygote; + }; explicit LargeObjectMapSpace(const std::string& name); virtual ~LargeObjectMapSpace() {} + bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE LOCKS_EXCLUDED(lock_); + void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE LOCKS_EXCLUDED(lock_); + // Used to ensure mutual exclusion when the allocation spaces data structures are being modified. mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - std::vector<mirror::Object*, TrackingAllocator<mirror::Object*, kAllocatorTagLOS>> large_objects_ + AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_ GUARDED_BY(lock_); - typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>, - TrackingAllocator<std::pair<mirror::Object*, MemMap*>, kAllocatorTagLOSMaps>> MemMaps; - MemMaps mem_maps_ GUARDED_BY(lock_); }; // A continuous large object space with a free-list to handle holes. @@ -177,6 +187,8 @@ class FreeListSpace FINAL : public LargeObjectSpace { } // Removes header from the free blocks set by finding the corresponding iterator and erasing it. void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_); + bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE; + void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE; class SortByPrevFree { public: @@ -192,7 +204,7 @@ class FreeListSpace FINAL : public LargeObjectSpace { std::unique_ptr<MemMap> allocation_info_map_; AllocationInfo* allocation_info_; - Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; // Free bytes at the end of the space. size_t free_end_ GUARDED_BY(lock_); FreeBlocks free_blocks_ GUARDED_BY(lock_); diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc index f04a7ca..05b484a 100644 --- a/runtime/gc/space/large_object_space_test.cc +++ b/runtime/gc/space/large_object_space_test.cc @@ -34,6 +34,7 @@ class LargeObjectSpaceTest : public SpaceTest { void LargeObjectSpaceTest::LargeObjectTest() { size_t rand_seed = 0; + Thread* const self = Thread::Current(); for (size_t i = 0; i < 2; ++i) { LargeObjectSpace* los = nullptr; if (i == 0) { @@ -51,8 +52,8 @@ void LargeObjectSpaceTest::LargeObjectTest() { size_t request_size = test_rand(&rand_seed) % max_allocation_size; size_t allocation_size = 0; size_t bytes_tl_bulk_allocated; - mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size, - nullptr, &bytes_tl_bulk_allocated); + mirror::Object* obj = los->Alloc(self, request_size, &allocation_size, nullptr, + &bytes_tl_bulk_allocated); ASSERT_TRUE(obj != nullptr); ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr)); ASSERT_GE(allocation_size, request_size); @@ -70,8 +71,21 @@ void LargeObjectSpaceTest::LargeObjectTest() { } } + // Check the zygote flag for the first phase. + if (phase == 0) { + for (const auto& pair : requests) { + mirror::Object* obj = pair.first; + ASSERT_FALSE(los->IsZygoteLargeObject(self, obj)); + } + los->SetAllLargeObjectsAsZygoteObjects(self); + for (const auto& pair : requests) { + mirror::Object* obj = pair.first; + ASSERT_TRUE(los->IsZygoteLargeObject(self, obj)); + } + } + // Free 1 / 2 the allocations the first phase, and all the second phase. - size_t limit = !phase ? requests.size() / 2 : 0; + size_t limit = phase == 0 ? requests.size() / 2 : 0; while (requests.size() > limit) { mirror::Object* obj = requests.back().first; size_t request_size = requests.back().second; @@ -88,7 +102,7 @@ void LargeObjectSpaceTest::LargeObjectTest() { size_t bytes_allocated = 0, bytes_tl_bulk_allocated; // Checks that the coalescing works. - mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr, + mirror::Object* obj = los->Alloc(self, 100 * MB, &bytes_allocated, nullptr, &bytes_tl_bulk_allocated); EXPECT_TRUE(obj != nullptr); los->Free(Thread::Current(), obj); |