diff options
author | Ian Rogers <irogers@google.com> | 2014-01-06 12:55:46 -0800 |
---|---|---|
committer | Ian Rogers <irogers@google.com> | 2014-02-06 23:20:27 -0800 |
commit | ef7d42fca18c16fbaf103822ad16f23246e2905d (patch) | |
tree | c67eea52a349c2ea7f2c3bdda8e73933c05531a8 /runtime/gc/space | |
parent | 822115a225185d2896607eb08d70ce5c7099adef (diff) | |
download | art-ef7d42fca18c16fbaf103822ad16f23246e2905d.zip art-ef7d42fca18c16fbaf103822ad16f23246e2905d.tar.gz art-ef7d42fca18c16fbaf103822ad16f23246e2905d.tar.bz2 |
Object model changes to support 64bit.
Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.
Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.
Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
Diffstat (limited to 'runtime/gc/space')
-rw-r--r-- | runtime/gc/space/bump_pointer_space.cc | 4 | ||||
-rw-r--r-- | runtime/gc/space/bump_pointer_space.h | 6 | ||||
-rw-r--r-- | runtime/gc/space/dlmalloc_space.cc | 2 | ||||
-rw-r--r-- | runtime/gc/space/dlmalloc_space.h | 10 | ||||
-rw-r--r-- | runtime/gc/space/image_space.cc | 6 | ||||
-rw-r--r-- | runtime/gc/space/image_space.h | 2 | ||||
-rw-r--r-- | runtime/gc/space/large_object_space.cc | 10 | ||||
-rw-r--r-- | runtime/gc/space/large_object_space.h | 5 | ||||
-rw-r--r-- | runtime/gc/space/malloc_space.cc | 2 | ||||
-rw-r--r-- | runtime/gc/space/malloc_space.h | 27 | ||||
-rw-r--r-- | runtime/gc/space/rosalloc_space.cc | 2 | ||||
-rw-r--r-- | runtime/gc/space/rosalloc_space.h | 10 | ||||
-rw-r--r-- | runtime/gc/space/space.h | 2 | ||||
-rw-r--r-- | runtime/gc/space/space_test.cc | 72 | ||||
-rw-r--r-- | runtime/gc/space/zygote_space.h | 2 |
15 files changed, 91 insertions, 71 deletions
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index 4dc17df..a314d74 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -29,7 +29,7 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac capacity = RoundUp(capacity, kPageSize); std::string error_msg; UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity, - PROT_READ | PROT_WRITE, &error_msg)); + PROT_READ | PROT_WRITE, true, &error_msg)); if (mem_map.get() == nullptr) { LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " << PrettySize(capacity) << " with message " << error_msg; @@ -69,7 +69,7 @@ mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes return ret; } -size_t BumpPointerSpace::AllocationSize(const mirror::Object* obj) { +size_t BumpPointerSpace::AllocationSize(mirror::Object* obj) { return AllocationSizeNonvirtual(obj); } diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index 3e25b6b..d73fe3b 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -49,8 +49,7 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes); // Return the storage space required by obj. - virtual size_t AllocationSize(const mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual size_t AllocationSize(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // NOPS unless we support free lists. virtual size_t Free(Thread*, mirror::Object*) { @@ -60,7 +59,7 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { return 0; } - size_t AllocationSizeNonvirtual(const mirror::Object* obj) + size_t AllocationSizeNonvirtual(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return obj->SizeOf(); } @@ -135,7 +134,6 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); - size_t InternalAllocationSize(const mirror::Object* obj); mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated) EXCLUSIVE_LOCKS_REQUIRED(lock_); diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 9ae6a33..931ed21 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -228,7 +228,7 @@ extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) { return dlmalloc_space->MoreCore(increment); } -size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) { +size_t DlMallocSpace::AllocationSize(mirror::Object* obj) { return AllocationSizeNonvirtual(obj); } diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h index 24308f7..4507c36 100644 --- a/runtime/gc/space/dlmalloc_space.h +++ b/runtime/gc/space/dlmalloc_space.h @@ -48,13 +48,15 @@ class DlMallocSpace : public MallocSpace { virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_); virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); - virtual size_t AllocationSize(const mirror::Object* obj); - virtual size_t Free(Thread* self, mirror::Object* ptr); - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); + virtual size_t AllocationSize(mirror::Object* obj); + virtual size_t Free(Thread* self, mirror::Object* ptr) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated); - size_t AllocationSizeNonvirtual(const mirror::Object* obj) { + size_t AllocationSizeNonvirtual(mirror::Object* obj) { void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj)); return mspace_usable_size(obj_ptr) + kChunkOverhead; } diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 4777cc6..ebad8dd 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -35,7 +35,7 @@ namespace art { namespace gc { namespace space { -AtomicInteger ImageSpace::bitmap_index_(0); +Atomic<uint32_t> ImageSpace::bitmap_index_(0); ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map, accounting::SpaceBitmap* live_bitmap) @@ -171,7 +171,7 @@ void ImageSpace::VerifyImageAllocations() { byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment); while (current < End()) { DCHECK_ALIGNED(current, kObjectAlignment); - const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current); + mirror::Object* obj = reinterpret_cast<mirror::Object*>(current); CHECK(live_bitmap_->Test(obj)); CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class"; current += RoundUp(obj->SizeOf(), kObjectAlignment); @@ -227,7 +227,7 @@ ImageSpace* ImageSpace::Init(const char* image_file_name, bool validate_oat_file *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str()); return nullptr; } - size_t bitmap_index = bitmap_index_.FetchAndAdd(1); + uint32_t bitmap_index = bitmap_index_.FetchAndAdd(1); std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_file_name, bitmap_index)); UniquePtr<accounting::SpaceBitmap> bitmap( diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index c3f0ae6..9e19774 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -94,7 +94,7 @@ class ImageSpace : public MemMapSpace { friend class Space; - static AtomicInteger bitmap_index_; + static Atomic<uint32_t> bitmap_index_; UniquePtr<accounting::SpaceBitmap> live_bitmap_; diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 7fcfed4..987a655 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -60,7 +60,7 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { std::string error_msg; MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes, - PROT_READ | PROT_WRITE, &error_msg); + PROT_READ | PROT_WRITE, true, &error_msg); if (UNLIKELY(mem_map == NULL)) { LOG(WARNING) << "Large object allocation failed: " << error_msg; return NULL; @@ -92,9 +92,9 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) { return allocation_size; } -size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) { +size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj) { MutexLock mu(Thread::Current(), lock_); - MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj)); + MemMaps::iterator found = mem_maps_.find(obj); CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live"; return found->second->Size(); } @@ -134,7 +134,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_be CHECK_EQ(size % kAlignment, 0U); std::string error_msg; MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size, - PROT_READ | PROT_WRITE, &error_msg); + PROT_READ | PROT_WRITE, true, &error_msg); CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg; return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End()); } @@ -244,7 +244,7 @@ bool FreeListSpace::Contains(const mirror::Object* obj) const { return mem_map_->HasAddress(obj); } -size_t FreeListSpace::AllocationSize(const mirror::Object* obj) { +size_t FreeListSpace::AllocationSize(mirror::Object* obj) { AllocationHeader* header = GetAllocationHeader(obj); DCHECK(Contains(obj)); DCHECK(!header->IsFree()); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index cd7c383..5274c8d 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -92,7 +92,7 @@ class LargeObjectMapSpace : public LargeObjectSpace { static LargeObjectMapSpace* Create(const std::string& name); // Return the storage space required by obj. - size_t AllocationSize(const mirror::Object* obj); + size_t AllocationSize(mirror::Object* obj); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); size_t Free(Thread* self, mirror::Object* ptr); void Walk(DlMallocSpace::WalkCallback, void* arg) LOCKS_EXCLUDED(lock_); @@ -118,8 +118,7 @@ class FreeListSpace : public LargeObjectSpace { virtual ~FreeListSpace(); static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity); - size_t AllocationSize(const mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + size_t AllocationSize(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); size_t Free(Thread* self, mirror::Object* obj); bool Contains(const mirror::Object* obj) const; diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index 6c6cb97..f90e6c7 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -87,7 +87,7 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, std::string error_msg; MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity, - PROT_READ | PROT_WRITE, &error_msg); + PROT_READ | PROT_WRITE, true, &error_msg); if (mem_map == nullptr) { LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " << PrettySize(*capacity) << ": " << error_msg; diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h index 9a42e2c..f17bcd2 100644 --- a/runtime/gc/space/malloc_space.h +++ b/runtime/gc/space/malloc_space.h @@ -58,9 +58,11 @@ class MallocSpace : public ContinuousMemMapAllocSpace { // Allocate num_bytes allowing the underlying space to grow. virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0; // Return the storage space required by obj. - virtual size_t AllocationSize(const mirror::Object* obj) = 0; - virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0; + virtual size_t AllocationSize(mirror::Object* obj) = 0; + virtual size_t Free(Thread* self, mirror::Object* ptr) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; #ifndef NDEBUG virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build. @@ -136,7 +138,9 @@ class MallocSpace : public ContinuousMemMapAllocSpace { virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, bool low_memory_mode) = 0; - void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_); + void RegisterRecentFree(mirror::Object* ptr) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(lock_); virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() { return &SweepCallback; @@ -163,7 +167,8 @@ class MallocSpace : public ContinuousMemMapAllocSpace { size_t growth_limit_; private: - static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg); + static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(MallocSpace); }; @@ -204,13 +209,14 @@ class ValgrindMallocSpace : public BaseMallocSpaceType { return result; } - virtual size_t AllocationSize(const mirror::Object* obj) { - size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<const mirror::Object*>( - reinterpret_cast<const byte*>(obj) - kValgrindRedZoneBytes)); + virtual size_t AllocationSize(mirror::Object* obj) { + size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<mirror::Object*>( + reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes)); return result - 2 * kValgrindRedZoneBytes; } - virtual size_t Free(Thread* self, mirror::Object* ptr) { + virtual size_t Free(Thread* self, mirror::Object* ptr) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { void* obj_after_rdz = reinterpret_cast<void*>(ptr); void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes; // Make redzones undefined. @@ -221,7 +227,8 @@ class ValgrindMallocSpace : public BaseMallocSpaceType { return freed - 2 * kValgrindRedZoneBytes; } - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { size_t freed = 0; for (size_t i = 0; i < num_ptrs; i++) { freed += Free(self, ptrs[i]); diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index 177e38e..86e441e 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -220,7 +220,7 @@ extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intpt return rosalloc_space->MoreCore(increment); } -size_t RosAllocSpace::AllocationSize(const mirror::Object* obj) { +size_t RosAllocSpace::AllocationSize(mirror::Object* obj) { return AllocationSizeNonvirtual(obj); } diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index 555eb3c..4cd5a6d 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -47,13 +47,15 @@ class RosAllocSpace : public MallocSpace { virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_); virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); - virtual size_t AllocationSize(const mirror::Object* obj); - virtual size_t Free(Thread* self, mirror::Object* ptr); - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); + virtual size_t AllocationSize(mirror::Object* obj); + virtual size_t Free(Thread* self, mirror::Object* ptr) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated); - size_t AllocationSizeNonvirtual(const mirror::Object* obj) + size_t AllocationSizeNonvirtual(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS { // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held. void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj)); diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index 95a79ec..98e6f65 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -223,7 +223,7 @@ class AllocSpace { virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0; // Return the storage space required by obj. - virtual size_t AllocationSize(const mirror::Object* obj) = 0; + virtual size_t AllocationSize(mirror::Object* obj) = 0; // Returns how many bytes were freed. virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc index 427d547..9989ffe 100644 --- a/runtime/gc/space/space_test.cc +++ b/runtime/gc/space/space_test.cc @@ -163,6 +163,7 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) { EXPECT_TRUE(ptr5 == NULL); // Release some memory. + ScopedObjectAccess soa(self); size_t free3 = space->AllocationSize(ptr3); EXPECT_EQ(free3, ptr3_bytes_allocated); EXPECT_EQ(free3, space->Free(self, ptr3)); @@ -257,6 +258,7 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) { EXPECT_TRUE(ptr5 == NULL); // Release some memory. + ScopedObjectAccess soa(self); size_t free3 = space->AllocationSize(ptr3); EXPECT_EQ(free3, ptr3_bytes_allocated); space->Free(self, ptr3); @@ -354,30 +356,36 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) { for (size_t i = 0; i < arraysize(lots_of_objects); i++) { size_t allocation_size = 0; lots_of_objects[i] = space->Alloc(self, 16, &allocation_size); - EXPECT_TRUE(lots_of_objects[i] != NULL); + EXPECT_TRUE(lots_of_objects[i] != nullptr); InstallClass(lots_of_objects[i], 16); EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i])); } - // Release memory and check pointers are NULL - space->FreeList(self, arraysize(lots_of_objects), lots_of_objects); - for (size_t i = 0; i < arraysize(lots_of_objects); i++) { - EXPECT_TRUE(lots_of_objects[i] == NULL); + // Release memory and check pointers are NULL. + { + ScopedObjectAccess soa(self); + space->FreeList(self, arraysize(lots_of_objects), lots_of_objects); + for (size_t i = 0; i < arraysize(lots_of_objects); i++) { + EXPECT_TRUE(lots_of_objects[i] == nullptr); + } } // Succeeds, fits by adjusting the max allowed footprint. for (size_t i = 0; i < arraysize(lots_of_objects); i++) { size_t allocation_size = 0; lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size); - EXPECT_TRUE(lots_of_objects[i] != NULL); + EXPECT_TRUE(lots_of_objects[i] != nullptr); InstallClass(lots_of_objects[i], 1024); EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i])); } // Release memory and check pointers are NULL - space->FreeList(self, arraysize(lots_of_objects), lots_of_objects); - for (size_t i = 0; i < arraysize(lots_of_objects); i++) { - EXPECT_TRUE(lots_of_objects[i] == NULL); + { + ScopedObjectAccess soa(self); + space->FreeList(self, arraysize(lots_of_objects), lots_of_objects); + for (size_t i = 0; i < arraysize(lots_of_objects); i++) { + EXPECT_TRUE(lots_of_objects[i] == nullptr); + } } } @@ -491,28 +499,30 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t break; } - // Free some objects - for (size_t i = 0; i < last_object; i += free_increment) { - mirror::Object* object = lots_of_objects.get()[i]; - if (object == NULL) { - continue; - } - size_t allocation_size = space->AllocationSize(object); - if (object_size > 0) { - EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); - } else { - EXPECT_GE(allocation_size, 8u); + { + // Free some objects + ScopedObjectAccess soa(self); + for (size_t i = 0; i < last_object; i += free_increment) { + mirror::Object* object = lots_of_objects.get()[i]; + if (object == NULL) { + continue; + } + size_t allocation_size = space->AllocationSize(object); + if (object_size > 0) { + EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); + } else { + EXPECT_GE(allocation_size, 8u); + } + space->Free(self, object); + lots_of_objects.get()[i] = NULL; + amount_allocated -= allocation_size; + footprint = space->GetFootprint(); + EXPECT_GE(space->Size(), footprint); // invariant } - space->Free(self, object); - lots_of_objects.get()[i] = NULL; - amount_allocated -= allocation_size; - footprint = space->GetFootprint(); - EXPECT_GE(space->Size(), footprint); // invariant - } - free_increment >>= 1; + free_increment >>= 1; + } } - // The space has become empty here before allocating a large object // below. For RosAlloc, revoke thread-local runs, which are kept // even when empty for a performance reason, so that they won't @@ -540,8 +550,10 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t EXPECT_LE(space->Size(), growth_limit); // Clean up - space->Free(self, large_object); - + { + ScopedObjectAccess soa(self); + space->Free(self, large_object); + } // Sanity check footprint footprint = space->GetFootprint(); EXPECT_LE(footprint, growth_limit); diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h index 10a5492..e0035b3 100644 --- a/runtime/gc/space/zygote_space.h +++ b/runtime/gc/space/zygote_space.h @@ -54,7 +54,7 @@ class ZygoteSpace : public ContinuousMemMapAllocSpace { LOG(FATAL) << "Unimplemented"; return nullptr; } - virtual size_t AllocationSize(const mirror::Object* obj) { + virtual size_t AllocationSize(mirror::Object* obj) { LOG(FATAL) << "Unimplemented"; return 0; } |