summaryrefslogtreecommitdiffstats
path: root/runtime/gc/space
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc/space')
-rw-r--r--runtime/gc/space/dlmalloc_space.cc480
-rw-r--r--runtime/gc/space/dlmalloc_space.h185
-rw-r--r--runtime/gc/space/image_space.cc129
-rw-r--r--runtime/gc/space/image_space.h81
-rw-r--r--runtime/gc/space/large_object_space.cc285
-rw-r--r--runtime/gc/space/large_object_space.h193
-rw-r--r--runtime/gc/space/space-inl.h48
-rw-r--r--runtime/gc/space/space.cc47
-rw-r--r--runtime/gc/space/space.h295
-rw-r--r--runtime/gc/space/space_test.cc429
10 files changed, 2172 insertions, 0 deletions
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
new file mode 100644
index 0000000..02acd28
--- /dev/null
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "dlmalloc_space.h"
+#include "gc/accounting/card_table.h"
+#include "gc/heap.h"
+#include "runtime.h"
+#include "thread.h"
+#include "utils.h"
+
+//#include <valgrind/memcheck.h>
+#include <valgrind.h>
+
+namespace art {
+namespace gc {
+namespace space {
+
+// TODO: Remove define macro
+#define CHECK_MEMORY_CALL(call, args, what) \
+ do { \
+ int rc = call args; \
+ if (UNLIKELY(rc != 0)) { \
+ errno = rc; \
+ PLOG(FATAL) << # call << " failed for " << what; \
+ } \
+ } while (false)
+
+static const bool kPrefetchDuringDlMallocFreeList = true;
+
+// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
+// after each allocation. 8 bytes provides long/double alignment.
+const size_t kValgrindRedZoneBytes = 8;
+
+// A specialization of DlMallocSpace that provides information to valgrind wrt allocations.
+class ValgrindDlMallocSpace : public DlMallocSpace {
+ public:
+ virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes) {
+ void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + (2 * kValgrindRedZoneBytes));
+ if (obj_with_rdz != NULL) {
+ //VALGRIND_MAKE_MEM_UNDEFINED();
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
+ kValgrindRedZoneBytes);
+ VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
+ LOG(INFO) << "AllocWithGrowth on " << self << " = " << obj_with_rdz
+ << " of size " << num_bytes;
+ return result;
+ } else {
+ return NULL;
+ }
+ }
+
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) {
+ void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + (2 * kValgrindRedZoneBytes));
+ if (obj_with_rdz != NULL) {
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
+ kValgrindRedZoneBytes);
+ VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
+ LOG(INFO) << "Alloc on " << self << " = " << obj_with_rdz
+ << " of size " << num_bytes;
+ return result;
+ } else {
+ return NULL;
+ }
+ }
+
+ virtual size_t AllocationSize(const mirror::Object* obj) {
+ const void* obj_after_rdz = reinterpret_cast<const void*>(obj);
+ size_t result = DlMallocSpace::AllocationSize(
+ reinterpret_cast<const mirror::Object*>(reinterpret_cast<const byte*>(obj_after_rdz) -
+ kValgrindRedZoneBytes));
+ return result - (2 * kValgrindRedZoneBytes);
+ }
+
+ virtual size_t Free(Thread* self, mirror::Object* ptr) {
+ void* obj_after_rdz = reinterpret_cast<void*>(ptr);
+ void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+ LOG(INFO) << "Free on " << self << " of " << obj_with_rdz;
+ size_t freed = DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
+ VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
+ return freed - (2 * kValgrindRedZoneBytes);
+ }
+
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+ size_t freed = 0;
+ for (size_t i = 0; i < num_ptrs; i++) {
+ void* obj_after_rdz = reinterpret_cast<void*>(ptrs[i]);
+ void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+ LOG(INFO) << "FreeList on " << self << " of " << obj_with_rdz;
+ freed += DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
+ VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
+ }
+ return freed - (2 * kValgrindRedZoneBytes * num_ptrs);
+ }
+
+ ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
+ byte* end, size_t growth_limit) :
+ DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
+ VALGRIND_CREATE_MEMPOOL(GetMspace(), kValgrindRedZoneBytes, true);
+ }
+
+ virtual ~ValgrindDlMallocSpace() {
+ VALGRIND_DESTROY_MEMPOOL(GetMspace());
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ValgrindDlMallocSpace);
+};
+
+size_t DlMallocSpace::bitmap_index_ = 0;
+
+DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
+ byte* end, size_t growth_limit)
+ : MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
+ num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
+ total_objects_allocated_(0), lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
+ growth_limit_(growth_limit) {
+ CHECK(mspace != NULL);
+
+ size_t bitmap_index = bitmap_index_++;
+
+ static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
+ CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
+ CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
+ live_bitmap_.reset(accounting::SpaceBitmap::Create(
+ StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
+ Begin(), Capacity()));
+ DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
+
+ mark_bitmap_.reset(accounting::SpaceBitmap::Create(
+ StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
+ Begin(), Capacity()));
+ DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
+}
+
+DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t
+ growth_limit, size_t capacity, byte* requested_begin) {
+ // Memory we promise to dlmalloc before it asks for morecore.
+ // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
+ // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
+ // size of the large allocation) will be greater than the footprint limit.
+ size_t starting_size = kPageSize;
+ uint64_t start_time = 0;
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ start_time = NanoTime();
+ VLOG(startup) << "Space::CreateAllocSpace entering " << name
+ << " initial_size=" << PrettySize(initial_size)
+ << " growth_limit=" << PrettySize(growth_limit)
+ << " capacity=" << PrettySize(capacity)
+ << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+ }
+
+ // Sanity check arguments
+ if (starting_size > initial_size) {
+ initial_size = starting_size;
+ }
+ if (initial_size > growth_limit) {
+ LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
+ << PrettySize(initial_size) << ") is larger than its capacity ("
+ << PrettySize(growth_limit) << ")";
+ return NULL;
+ }
+ if (growth_limit > capacity) {
+ LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
+ << PrettySize(growth_limit) << ") is larger than the capacity ("
+ << PrettySize(capacity) << ")";
+ return NULL;
+ }
+
+ // Page align growth limit and capacity which will be used to manage mmapped storage
+ growth_limit = RoundUp(growth_limit, kPageSize);
+ capacity = RoundUp(capacity, kPageSize);
+
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin,
+ capacity, PROT_READ | PROT_WRITE));
+ if (mem_map.get() == NULL) {
+ LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
+ << PrettySize(capacity);
+ return NULL;
+ }
+
+ void* mspace = CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
+ if (mspace == NULL) {
+ LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
+ return NULL;
+ }
+
+ // Protect memory beyond the initial size.
+ byte* end = mem_map->Begin() + starting_size;
+ if (capacity - initial_size > 0) {
+ CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
+ }
+
+ // Everything is set so record in immutable structure and leave
+ MemMap* mem_map_ptr = mem_map.release();
+ DlMallocSpace* space;
+ if (RUNNING_ON_VALGRIND > 0) {
+ space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+ growth_limit);
+ } else {
+ space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
+ }
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+ << " ) " << *space;
+ }
+ return space;
+}
+
+void* DlMallocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
+ // clear errno to allow PLOG on error
+ errno = 0;
+ // create mspace using our backing storage starting at begin and with a footprint of
+ // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
+ // morecore_start bytes of memory is exhaused morecore will be called.
+ void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
+ if (msp != NULL) {
+ // Do not allow morecore requests to succeed beyond the initial size of the heap
+ mspace_set_footprint_limit(msp, initial_size);
+ } else {
+ PLOG(ERROR) << "create_mspace_with_base failed";
+ }
+ return msp;
+}
+
+void DlMallocSpace::SwapBitmaps() {
+ live_bitmap_.swap(mark_bitmap_);
+ // Swap names to get more descriptive diagnostics.
+ std::string temp_name(live_bitmap_->GetName());
+ live_bitmap_->SetName(mark_bitmap_->GetName());
+ mark_bitmap_->SetName(temp_name);
+}
+
+mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_calloc(mspace_, 1, num_bytes));
+ if (result != NULL) {
+ if (kDebugSpaces) {
+ CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
+ << ") not in bounds of allocation space " << *this;
+ }
+ size_t allocation_size = AllocationSize(result);
+ num_bytes_allocated_ += allocation_size;
+ total_bytes_allocated_ += allocation_size;
+ ++total_objects_allocated_;
+ ++num_objects_allocated_;
+ }
+ return result;
+}
+
+mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
+ MutexLock mu(self, lock_);
+ return AllocWithoutGrowthLocked(num_bytes);
+}
+
+mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
+ MutexLock mu(self, lock_);
+ // Grow as much as possible within the mspace.
+ size_t max_allowed = Capacity();
+ mspace_set_footprint_limit(mspace_, max_allowed);
+ // Try the allocation.
+ mirror::Object* result = AllocWithoutGrowthLocked(num_bytes);
+ // Shrink back down as small as possible.
+ size_t footprint = mspace_footprint(mspace_);
+ mspace_set_footprint_limit(mspace_, footprint);
+ // Return the new allocation or NULL.
+ CHECK(!kDebugSpaces || result == NULL || Contains(result));
+ return result;
+}
+
+void DlMallocSpace::SetGrowthLimit(size_t growth_limit) {
+ growth_limit = RoundUp(growth_limit, kPageSize);
+ growth_limit_ = growth_limit;
+ if (Size() > growth_limit_) {
+ end_ = begin_ + growth_limit;
+ }
+}
+
+DlMallocSpace* DlMallocSpace::CreateZygoteSpace() {
+ end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
+ DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
+ DCHECK(IsAligned<accounting::CardTable::kCardSize>(end_));
+ DCHECK(IsAligned<kPageSize>(begin_));
+ DCHECK(IsAligned<kPageSize>(end_));
+ size_t size = RoundUp(Size(), kPageSize);
+ // Trim the heap so that we minimize the size of the Zygote space.
+ Trim();
+ // Trim our mem-map to free unused pages.
+ GetMemMap()->UnMapAtEnd(end_);
+ // TODO: Not hardcode these in?
+ const size_t starting_size = kPageSize;
+ const size_t initial_size = 2 * MB;
+ // Remaining size is for the new alloc space.
+ const size_t growth_limit = growth_limit_ - size;
+ const size_t capacity = Capacity() - size;
+ VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_) << "\n"
+ << "End " << reinterpret_cast<const void*>(end_) << "\n"
+ << "Size " << size << "\n"
+ << "GrowthLimit " << growth_limit_ << "\n"
+ << "Capacity " << Capacity();
+ SetGrowthLimit(RoundUp(size, kPageSize));
+ SetFootprintLimit(RoundUp(size, kPageSize));
+ // FIXME: Do we need reference counted pointers here?
+ // Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
+ VLOG(heap) << "Creating new AllocSpace: ";
+ VLOG(heap) << "Size " << GetMemMap()->Size();
+ VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
+ VLOG(heap) << "Capacity " << PrettySize(capacity);
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName(), End(), capacity, PROT_READ | PROT_WRITE));
+ void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
+ // Protect memory beyond the initial size.
+ byte* end = mem_map->Begin() + starting_size;
+ if (capacity - initial_size > 0) {
+ CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
+ }
+ DlMallocSpace* alloc_space =
+ new DlMallocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
+ live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+ CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
+ mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+ CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
+ name_ += "-zygote-transformed";
+ VLOG(heap) << "zygote space creation done";
+ return alloc_space;
+}
+
+size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
+ MutexLock mu(self, lock_);
+ if (kDebugSpaces) {
+ CHECK(ptr != NULL);
+ CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
+ }
+ const size_t bytes_freed = InternalAllocationSize(ptr);
+ num_bytes_allocated_ -= bytes_freed;
+ --num_objects_allocated_;
+ mspace_free(mspace_, ptr);
+ return bytes_freed;
+}
+
+size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+ DCHECK(ptrs != NULL);
+
+ // Don't need the lock to calculate the size of the freed pointers.
+ size_t bytes_freed = 0;
+ for (size_t i = 0; i < num_ptrs; i++) {
+ mirror::Object* ptr = ptrs[i];
+ const size_t look_ahead = 8;
+ if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
+ // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
+ __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
+ }
+ bytes_freed += InternalAllocationSize(ptr);
+ }
+
+ if (kDebugSpaces) {
+ size_t num_broken_ptrs = 0;
+ for (size_t i = 0; i < num_ptrs; i++) {
+ if (!Contains(ptrs[i])) {
+ num_broken_ptrs++;
+ LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
+ } else {
+ size_t size = mspace_usable_size(ptrs[i]);
+ memset(ptrs[i], 0xEF, size);
+ }
+ }
+ CHECK_EQ(num_broken_ptrs, 0u);
+ }
+
+ {
+ MutexLock mu(self, lock_);
+ num_bytes_allocated_ -= bytes_freed;
+ num_objects_allocated_ -= num_ptrs;
+ mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
+ return bytes_freed;
+ }
+}
+
+// Callback from dlmalloc when it needs to increase the footprint
+extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
+ return heap->GetAllocSpace()->MoreCore(increment);
+}
+
+void* DlMallocSpace::MoreCore(intptr_t increment) {
+ lock_.AssertHeld(Thread::Current());
+ byte* original_end = end_;
+ if (increment != 0) {
+ VLOG(heap) << "DlMallocSpace::MoreCore " << PrettySize(increment);
+ byte* new_end = original_end + increment;
+ if (increment > 0) {
+ // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
+ // by mspace_set_footprint_limit.
+ CHECK_LE(new_end, Begin() + Capacity());
+ CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
+ } else {
+ // Should never be asked for negative footprint (ie before begin)
+ CHECK_GT(original_end + increment, Begin());
+ // Advise we don't need the pages and protect them
+ // TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
+ // expensive (note the same isn't true for giving permissions to a page as the protected
+ // page shouldn't be in a TLB). We should investigate performance impact of just
+ // removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
+ // likely just a useful debug feature.
+ size_t size = -increment;
+ CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetName());
+ CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetName());
+ }
+ // Update end_
+ end_ = new_end;
+ }
+ return original_end;
+}
+
+// Virtual functions can't get inlined.
+inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) {
+ return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
+ kChunkOverhead;
+}
+
+size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
+ return InternalAllocationSize(obj);
+}
+
+size_t DlMallocSpace::Trim() {
+ MutexLock mu(Thread::Current(), lock_);
+ // Trim to release memory at the end of the space.
+ mspace_trim(mspace_, 0);
+ // Visit space looking for page-sized holes to advise the kernel we don't need.
+ size_t reclaimed = 0;
+ mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
+ return reclaimed;
+}
+
+void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
+ void* arg) {
+ MutexLock mu(Thread::Current(), lock_);
+ mspace_inspect_all(mspace_, callback, arg);
+ callback(NULL, NULL, 0, arg); // Indicate end of a space.
+}
+
+size_t DlMallocSpace::GetFootprintLimit() {
+ MutexLock mu(Thread::Current(), lock_);
+ return mspace_footprint_limit(mspace_);
+}
+
+void DlMallocSpace::SetFootprintLimit(size_t new_size) {
+ MutexLock mu(Thread::Current(), lock_);
+ VLOG(heap) << "DLMallocSpace::SetFootprintLimit " << PrettySize(new_size);
+ // Compare against the actual footprint, rather than the Size(), because the heap may not have
+ // grown all the way to the allowed size yet.
+ size_t current_space_size = mspace_footprint(mspace_);
+ if (new_size < current_space_size) {
+ // Don't let the space grow any more.
+ new_size = current_space_size;
+ }
+ mspace_set_footprint_limit(mspace_, new_size);
+}
+
+void DlMallocSpace::Dump(std::ostream& os) const {
+ os << GetType()
+ << " begin=" << reinterpret_cast<void*>(Begin())
+ << ",end=" << reinterpret_cast<void*>(End())
+ << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
+ << ",name=\"" << GetName() << "\"]";
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
new file mode 100644
index 0000000..00df0e6
--- /dev/null
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
+#define ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
+
+#include "gc/allocator/dlmalloc.h"
+#include "space.h"
+
+namespace art {
+namespace gc {
+
+namespace collector {
+ class MarkSweep;
+} // namespace collector
+
+namespace space {
+
+// An alloc space is a space where objects may be allocated and garbage collected.
+class DlMallocSpace : public MemMapSpace, public AllocSpace {
+ public:
+ typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+ SpaceType GetType() const {
+ if (GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+ return kSpaceTypeZygoteSpace;
+ } else {
+ return kSpaceTypeAllocSpace;
+ }
+ }
+
+ // Create a AllocSpace with the requested sizes. The requested
+ // base address is not guaranteed to be granted, if it is required,
+ // the caller should call Begin on the returned space to confirm
+ // the request was granted.
+ static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin);
+
+ // Allocate num_bytes without allowing the underlying mspace to grow.
+ virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
+
+ // Allocate num_bytes allowing the underlying mspace to grow.
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const mirror::Object* obj);
+ virtual size_t Free(Thread* self, mirror::Object* ptr);
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+
+ void* MoreCore(intptr_t increment);
+
+ void* GetMspace() const {
+ return mspace_;
+ }
+
+ // Hands unused pages back to the system.
+ size_t Trim();
+
+ // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
+ // in use, indicated by num_bytes equaling zero.
+ void Walk(WalkCallback callback, void* arg);
+
+ // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
+ size_t GetFootprintLimit();
+
+ // Set the maximum number of bytes that the heap is allowed to obtain from the system via
+ // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
+ // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
+ void SetFootprintLimit(size_t limit);
+
+ // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+ // maximum reserved size of the heap.
+ void ClearGrowthLimit() {
+ growth_limit_ = NonGrowthLimitCapacity();
+ }
+
+ // Override capacity so that we only return the possibly limited capacity
+ size_t Capacity() const {
+ return growth_limit_;
+ }
+
+ // The total amount of memory reserved for the alloc space.
+ size_t NonGrowthLimitCapacity() const {
+ return GetMemMap()->Size();
+ }
+
+ accounting::SpaceBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
+
+ accounting::SpaceBitmap* GetMarkBitmap() const {
+ return mark_bitmap_.get();
+ }
+
+ void Dump(std::ostream& os) const;
+
+ void SetGrowthLimit(size_t growth_limit);
+
+ // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
+ void SwapBitmaps();
+
+ // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
+ DlMallocSpace* CreateZygoteSpace();
+
+ uint64_t GetBytesAllocated() const {
+ return num_bytes_allocated_;
+ }
+
+ uint64_t GetObjectsAllocated() const {
+ return num_objects_allocated_;
+ }
+
+ uint64_t GetTotalBytesAllocated() const {
+ return total_bytes_allocated_;
+ }
+
+ uint64_t GetTotalObjectsAllocated() const {
+ return total_objects_allocated_;
+ }
+
+ protected:
+ DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
+ size_t growth_limit);
+
+ private:
+ size_t InternalAllocationSize(const mirror::Object* obj);
+ mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
+
+ static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
+
+ UniquePtr<accounting::SpaceBitmap> live_bitmap_;
+ UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
+ UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
+
+ // Approximate number of bytes which have been allocated into the space.
+ size_t num_bytes_allocated_;
+ size_t num_objects_allocated_;
+ size_t total_bytes_allocated_;
+ size_t total_objects_allocated_;
+
+ static size_t bitmap_index_;
+
+ // The boundary tag overhead.
+ static const size_t kChunkOverhead = kWordSize;
+
+ // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+ Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ // Underlying malloc space
+ void* const mspace_;
+
+ // The capacity of the alloc space until such time that ClearGrowthLimit is called.
+ // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
+ // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
+ // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
+ // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
+ // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
+ // one time by a call to ClearGrowthLimit.
+ size_t growth_limit_;
+
+ friend class collector::MarkSweep;
+
+ DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
new file mode 100644
index 0000000..46c3937
--- /dev/null
+++ b/runtime/gc/space/image_space.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "image_space.h"
+
+#include "base/unix_file/fd_file.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "mirror/abstract_method.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "os.h"
+#include "runtime.h"
+#include "space-inl.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+size_t ImageSpace::bitmap_index_ = 0;
+
+ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
+: MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
+ const size_t bitmap_index = bitmap_index_++;
+ live_bitmap_.reset(accounting::SpaceBitmap::Create(
+ StringPrintf("imagespace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
+ Begin(), Capacity()));
+ DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
+}
+
+ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
+ CHECK(!image_file_name.empty());
+
+ uint64_t start_time = 0;
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ start_time = NanoTime();
+ LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
+ }
+
+ UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
+ if (file.get() == NULL) {
+ LOG(ERROR) << "Failed to open " << image_file_name;
+ return NULL;
+ }
+ ImageHeader image_header;
+ bool success = file->ReadFully(&image_header, sizeof(image_header));
+ if (!success || !image_header.IsValid()) {
+ LOG(ERROR) << "Invalid image header " << image_file_name;
+ return NULL;
+ }
+ UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
+ file->GetLength(),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_FIXED,
+ file->Fd(),
+ 0,
+ false));
+ if (map.get() == NULL) {
+ LOG(ERROR) << "Failed to map " << image_file_name;
+ return NULL;
+ }
+ CHECK_EQ(image_header.GetImageBegin(), map->Begin());
+ DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
+
+ Runtime* runtime = Runtime::Current();
+ mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+ runtime->SetResolutionMethod(down_cast<mirror::AbstractMethod*>(resolution_method));
+
+ mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
+ callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
+ callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+
+ ImageSpace* space = new ImageSpace(image_file_name, map.release());
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+ << ") " << *space;
+ }
+ return space;
+}
+
+void ImageSpace::RecordImageAllocations(accounting::SpaceBitmap* live_bitmap) const {
+ uint64_t start_time = 0;
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
+ start_time = NanoTime();
+ }
+ DCHECK(!Runtime::Current()->IsStarted());
+ CHECK(live_bitmap != NULL);
+ byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+ byte* end = End();
+ while (current < end) {
+ DCHECK_ALIGNED(current, kObjectAlignment);
+ const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
+ live_bitmap->Set(obj);
+ current += RoundUp(obj->SizeOf(), kObjectAlignment);
+ }
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
+ << PrettyDuration(NanoTime() - start_time) << ")";
+ }
+}
+
+void ImageSpace::Dump(std::ostream& os) const {
+ os << GetType()
+ << "begin=" << reinterpret_cast<void*>(Begin())
+ << ",end=" << reinterpret_cast<void*>(End())
+ << ",size=" << PrettySize(Size())
+ << ",name=\"" << GetName() << "\"]";
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
new file mode 100644
index 0000000..afec5b7
--- /dev/null
+++ b/runtime/gc/space/image_space.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_IMAGE_SPACE_H_
+#define ART_SRC_GC_SPACE_IMAGE_SPACE_H_
+
+#include "space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+// An image space is a space backed with a memory mapped image.
+class ImageSpace : public MemMapSpace {
+ public:
+ bool CanAllocateInto() const {
+ return false;
+ }
+
+ SpaceType GetType() const {
+ return kSpaceTypeImageSpace;
+ }
+
+ // create a Space from an image file. cannot be used for future allocation or collected.
+ static ImageSpace* Create(const std::string& image)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const ImageHeader& GetImageHeader() const {
+ return *reinterpret_cast<ImageHeader*>(Begin());
+ }
+
+ const std::string GetImageFilename() const {
+ return GetName();
+ }
+
+ // Mark the objects defined in this space in the given live bitmap
+ void RecordImageAllocations(accounting::SpaceBitmap* live_bitmap) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ accounting::SpaceBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
+
+ accounting::SpaceBitmap* GetMarkBitmap() const {
+ // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
+ // special cases to test against.
+ return live_bitmap_.get();
+ }
+
+ void Dump(std::ostream& os) const;
+
+ private:
+ friend class Space;
+
+ static size_t bitmap_index_;
+
+ UniquePtr<accounting::SpaceBitmap> live_bitmap_;
+
+ ImageSpace(const std::string& name, MemMap* mem_map);
+
+ DISALLOW_COPY_AND_ASSIGN(ImageSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_IMAGE_SPACE_H_
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
new file mode 100644
index 0000000..3cee1b7
--- /dev/null
+++ b/runtime/gc/space/large_object_space.cc
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "large_object_space.h"
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "UniquePtr.h"
+#include "image.h"
+#include "os.h"
+#include "thread.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+void LargeObjectSpace::SwapBitmaps() {
+ live_objects_.swap(mark_objects_);
+ // Swap names to get more descriptive diagnostics.
+ std::string temp_name = live_objects_->GetName();
+ live_objects_->SetName(mark_objects_->GetName());
+ mark_objects_->SetName(temp_name);
+}
+
+LargeObjectSpace::LargeObjectSpace(const std::string& name)
+ : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
+ num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
+ total_objects_allocated_(0) {
+}
+
+
+void LargeObjectSpace::CopyLiveToMarked() {
+ mark_objects_->CopyFrom(*live_objects_.get());
+}
+
+LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
+ : LargeObjectSpace(name),
+ lock_("large object map space lock", kAllocSpaceLock)
+{
+
+}
+
+LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
+ return new LargeObjectMapSpace(name);
+}
+
+mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
+ MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
+ PROT_READ | PROT_WRITE);
+ if (mem_map == NULL) {
+ return NULL;
+ }
+ MutexLock mu(self, lock_);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
+ large_objects_.push_back(obj);
+ mem_maps_.Put(obj, mem_map);
+ size_t allocation_size = mem_map->Size();
+ num_bytes_allocated_ += allocation_size;
+ total_bytes_allocated_ += allocation_size;
+ ++num_objects_allocated_;
+ ++total_objects_allocated_;
+ return obj;
+}
+
+size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
+ MutexLock mu(self, lock_);
+ MemMaps::iterator found = mem_maps_.find(ptr);
+ CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
+ DCHECK_GE(num_bytes_allocated_, found->second->Size());
+ size_t allocation_size = found->second->Size();
+ num_bytes_allocated_ -= allocation_size;
+ --num_objects_allocated_;
+ delete found->second;
+ mem_maps_.erase(found);
+ return allocation_size;
+}
+
+size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) {
+ MutexLock mu(Thread::Current(), lock_);
+ MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj));
+ CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
+ return found->second->Size();
+}
+
+size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+ size_t total = 0;
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ if (kDebugSpaces) {
+ CHECK(Contains(ptrs[i]));
+ }
+ total += Free(self, ptrs[i]);
+ }
+ return total;
+}
+
+void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
+ MutexLock mu(Thread::Current(), lock_);
+ for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
+ MemMap* mem_map = it->second;
+ callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
+ callback(NULL, NULL, 0, arg);
+ }
+}
+
+bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
+ Thread* self = Thread::Current();
+ if (lock_.IsExclusiveHeld(self)) {
+ // We hold lock_ so do the check.
+ return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
+ } else {
+ MutexLock mu(self, lock_);
+ return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
+ }
+}
+
+FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
+ CHECK(size % kAlignment == 0);
+ MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
+ PROT_READ | PROT_WRITE);
+ CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
+ return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+}
+
+FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
+ : LargeObjectSpace(name),
+ begin_(begin),
+ end_(end),
+ mem_map_(mem_map),
+ lock_("free list space lock", kAllocSpaceLock) {
+ chunks_.resize(Size() / kAlignment + 1);
+ // Add a dummy chunk so we don't need to handle chunks having no next chunk.
+ chunks_.back().SetSize(kAlignment, false);
+ // Start out with one large free chunk.
+ AddFreeChunk(begin_, end_ - begin_, NULL);
+}
+
+FreeListSpace::~FreeListSpace() {
+
+}
+
+void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
+ Chunk* chunk = ChunkFromAddr(address);
+ chunk->SetSize(size, true);
+ chunk->SetPrevious(previous);
+ Chunk* next_chunk = GetNextChunk(chunk);
+ next_chunk->SetPrevious(chunk);
+ free_chunks_.insert(chunk);
+}
+
+FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
+ size_t offset = reinterpret_cast<byte*>(address) - Begin();
+ DCHECK(IsAligned<kAlignment>(offset));
+ DCHECK_LT(offset, Size());
+ return &chunks_[offset / kAlignment];
+}
+
+void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
+ return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
+}
+
+void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
+ // TODO: C++0x
+ // TODO: Improve performance, this might be slow.
+ std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
+ for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
+ if (*it == chunk) {
+ free_chunks_.erase(it);
+ return;
+ }
+ }
+}
+
+void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
+ MutexLock mu(Thread::Current(), lock_);
+ for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
+ if (!chunk->IsFree()) {
+ size_t size = chunk->GetSize();
+ void* begin = AddrFromChunk(chunk);
+ void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
+ callback(begin, end, size, arg);
+ callback(NULL, NULL, 0, arg);
+ }
+ chunk = GetNextChunk(chunk);
+ }
+}
+
+size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
+ MutexLock mu(self, lock_);
+ CHECK(Contains(obj));
+ // Check adjacent chunks to see if we need to combine.
+ Chunk* chunk = ChunkFromAddr(obj);
+ CHECK(!chunk->IsFree());
+
+ size_t allocation_size = chunk->GetSize();
+ if (kIsDebugBuild) {
+ memset(obj, 0xEB, allocation_size);
+ }
+ madvise(obj, allocation_size, MADV_DONTNEED);
+ num_objects_allocated_--;
+ num_bytes_allocated_ -= allocation_size;
+ Chunk* prev = chunk->GetPrevious();
+ Chunk* next = GetNextChunk(chunk);
+
+ // Combine any adjacent free chunks
+ size_t extra_size = chunk->GetSize();
+ if (next->IsFree()) {
+ extra_size += next->GetSize();
+ RemoveFreeChunk(next);
+ }
+ if (prev != NULL && prev->IsFree()) {
+ RemoveFreeChunk(prev);
+ AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
+ } else {
+ AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
+ }
+ return allocation_size;
+}
+
+bool FreeListSpace::Contains(const mirror::Object* obj) const {
+ return mem_map_->HasAddress(obj);
+}
+
+FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
+ return chunk + chunk->GetSize() / kAlignment;
+}
+
+size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
+ Chunk* chunk = ChunkFromAddr(const_cast<mirror::Object*>(obj));
+ CHECK(!chunk->IsFree());
+ return chunk->GetSize();
+}
+
+mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
+ MutexLock mu(self, lock_);
+ num_bytes = RoundUp(num_bytes, kAlignment);
+ Chunk temp;
+ temp.SetSize(num_bytes);
+ // Find the smallest chunk at least num_bytes in size.
+ FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
+ if (found == free_chunks_.end()) {
+ // Out of memory, or too much fragmentation.
+ return NULL;
+ }
+ Chunk* chunk = *found;
+ free_chunks_.erase(found);
+ CHECK(chunk->IsFree());
+ void* addr = AddrFromChunk(chunk);
+ size_t chunk_size = chunk->GetSize();
+ chunk->SetSize(num_bytes);
+ if (chunk_size > num_bytes) {
+ // Split the chunk into two chunks.
+ Chunk* new_chunk = GetNextChunk(chunk);
+ AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
+ }
+
+ num_objects_allocated_++;
+ total_objects_allocated_++;
+ num_bytes_allocated_ += num_bytes;
+ total_bytes_allocated_ += num_bytes;
+ return reinterpret_cast<mirror::Object*>(addr);
+}
+
+void FreeListSpace::Dump(std::ostream& os) const{
+ os << GetName() << " -"
+ << " begin: " << reinterpret_cast<void*>(Begin())
+ << " end: " << reinterpret_cast<void*>(End());
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
new file mode 100644
index 0000000..197fad3
--- /dev/null
+++ b/runtime/gc/space/large_object_space.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
+#define ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
+
+
+#include "dlmalloc_space.h"
+#include "safe_map.h"
+#include "space.h"
+
+#include <set>
+#include <vector>
+
+namespace art {
+namespace gc {
+namespace space {
+
+// Abstraction implemented by all large object spaces.
+class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
+ public:
+ virtual SpaceType GetType() const {
+ return kSpaceTypeLargeObjectSpace;
+ }
+
+ virtual void SwapBitmaps();
+ virtual void CopyLiveToMarked();
+ virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
+ virtual ~LargeObjectSpace() {}
+
+ uint64_t GetBytesAllocated() const {
+ return num_bytes_allocated_;
+ }
+
+ uint64_t GetObjectsAllocated() const {
+ return num_objects_allocated_;
+ }
+
+ uint64_t GetTotalBytesAllocated() const {
+ return total_bytes_allocated_;
+ }
+
+ uint64_t GetTotalObjectsAllocated() const {
+ return total_objects_allocated_;
+ }
+
+ size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+
+ protected:
+
+ LargeObjectSpace(const std::string& name);
+
+ // Approximate number of bytes which have been allocated into the space.
+ size_t num_bytes_allocated_;
+ size_t num_objects_allocated_;
+ size_t total_bytes_allocated_;
+ size_t total_objects_allocated_;
+
+ friend class Space;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
+};
+
+// A discontinuous large object space implemented by individual mmap/munmap calls.
+class LargeObjectMapSpace : public LargeObjectSpace {
+ public:
+ // Creates a large object space. Allocations into the large object space use memory maps instead
+ // of malloc.
+ static LargeObjectMapSpace* Create(const std::string& name);
+
+ // Return the storage space required by obj.
+ size_t AllocationSize(const mirror::Object* obj);
+ mirror::Object* Alloc(Thread* self, size_t num_bytes);
+ size_t Free(Thread* self, mirror::Object* ptr);
+ void Walk(DlMallocSpace::WalkCallback, void* arg);
+ // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
+ bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+
+private:
+ LargeObjectMapSpace(const std::string& name);
+ virtual ~LargeObjectMapSpace() {}
+
+ // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+ mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::vector<mirror::Object*> large_objects_ GUARDED_BY(lock_);
+ typedef SafeMap<mirror::Object*, MemMap*> MemMaps;
+ MemMaps mem_maps_ GUARDED_BY(lock_);
+};
+
+// A continuous large object space with a free-list to handle holes.
+// TODO: this implementation is buggy.
+class FreeListSpace : public LargeObjectSpace {
+ public:
+ virtual ~FreeListSpace();
+ static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
+
+ size_t AllocationSize(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ mirror::Object* Alloc(Thread* self, size_t num_bytes);
+ size_t Free(Thread* self, mirror::Object* obj);
+ bool Contains(const mirror::Object* obj) const;
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg);
+
+ // Address at which the space begins.
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
+ // Current size of space
+ size_t Size() const {
+ return End() - Begin();
+ }
+
+ void Dump(std::ostream& os) const;
+
+ private:
+ static const size_t kAlignment = kPageSize;
+
+ class Chunk {
+ public:
+ static const size_t kFreeFlag = 0x80000000;
+
+ struct SortBySize {
+ bool operator()(const Chunk* a, const Chunk* b) const {
+ return a->GetSize() < b->GetSize();
+ }
+ };
+
+ bool IsFree() const {
+ return (m_size & kFreeFlag) != 0;
+ }
+
+ void SetSize(size_t size, bool is_free = false) {
+ m_size = size | (is_free ? kFreeFlag : 0);
+ }
+
+ size_t GetSize() const {
+ return m_size & (kFreeFlag - 1);
+ }
+
+ Chunk* GetPrevious() {
+ return m_previous;
+ }
+
+ void SetPrevious(Chunk* previous) {
+ m_previous = previous;
+ DCHECK(m_previous == NULL ||
+ (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
+ }
+ private:
+ size_t m_size;
+ Chunk* m_previous;
+ };
+
+ FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+ void AddFreeChunk(void* address, size_t size, Chunk* previous) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ Chunk* ChunkFromAddr(void* address) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void* AddrFromChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void RemoveFreeChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ Chunk* GetNextChunk(Chunk* chunk);
+
+ typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
+ byte* const begin_;
+ byte* const end_;
+ UniquePtr<MemMap> mem_map_;
+ Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::vector<Chunk> chunks_ GUARDED_BY(lock_);
+ FreeChunks free_chunks_ GUARDED_BY(lock_);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
diff --git a/runtime/gc/space/space-inl.h b/runtime/gc/space/space-inl.h
new file mode 100644
index 0000000..54bf604
--- /dev/null
+++ b/runtime/gc/space/space-inl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_SPACE_INL_H_
+#define ART_SRC_GC_SPACE_SPACE_INL_H_
+
+#include "space.h"
+
+#include "dlmalloc_space.h"
+#include "image_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline ImageSpace* Space::AsImageSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+ return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline DlMallocSpace* Space::AsDlMallocSpace() {
+ DCHECK(GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace);
+ return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline LargeObjectSpace* Space::AsLargeObjectSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
+ return reinterpret_cast<LargeObjectSpace*>(this);
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_SPACE_INL_H_
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
new file mode 100644
index 0000000..eae281a
--- /dev/null
+++ b/runtime/gc/space/space.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space.h"
+
+#include "base/logging.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy) :
+ name_(name), gc_retention_policy_(gc_retention_policy) { }
+
+void Space::Dump(std::ostream& os) const {
+ os << GetName() << ":" << GetGcRetentionPolicy();
+}
+
+std::ostream& operator<<(std::ostream& os, const Space& space) {
+ space.Dump(os);
+ return os;
+}
+
+
+DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
+ GcRetentionPolicy gc_retention_policy) :
+ Space(name, gc_retention_policy),
+ live_objects_(new accounting::SpaceSetMap("large live objects")),
+ mark_objects_(new accounting::SpaceSetMap("large marked objects")) {
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
new file mode 100644
index 0000000..ca01c55
--- /dev/null
+++ b/runtime/gc/space/space.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_SPACE_H_
+#define ART_SRC_GC_SPACE_SPACE_H_
+
+#include <string>
+
+#include "UniquePtr.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/accounting/space_bitmap.h"
+#include "globals.h"
+#include "image.h"
+#include "mem_map.h"
+
+namespace art {
+namespace mirror {
+ class Object;
+} // namespace mirror
+
+namespace gc {
+
+namespace accounting {
+ class SpaceBitmap;
+} // namespace accounting
+
+class Heap;
+
+namespace space {
+
+class DlMallocSpace;
+class ImageSpace;
+class LargeObjectSpace;
+
+static const bool kDebugSpaces = kIsDebugBuild;
+
+// See Space::GetGcRetentionPolicy.
+enum GcRetentionPolicy {
+ // Objects are retained forever with this policy for a space.
+ kGcRetentionPolicyNeverCollect,
+ // Every GC cycle will attempt to collect objects in this space.
+ kGcRetentionPolicyAlwaysCollect,
+ // Objects will be considered for collection only in "full" GC cycles, ie faster partial
+ // collections won't scan these areas such as the Zygote.
+ kGcRetentionPolicyFullCollect,
+};
+std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
+
+enum SpaceType {
+ kSpaceTypeImageSpace,
+ kSpaceTypeAllocSpace,
+ kSpaceTypeZygoteSpace,
+ kSpaceTypeLargeObjectSpace,
+};
+std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
+
+// A space contains memory allocated for managed objects.
+class Space {
+ public:
+ // Dump space. Also key method for C++ vtables.
+ virtual void Dump(std::ostream& os) const;
+
+ // Name of the space. May vary, for example before/after the Zygote fork.
+ const char* GetName() const {
+ return name_.c_str();
+ }
+
+ // The policy of when objects are collected associated with this space.
+ GcRetentionPolicy GetGcRetentionPolicy() const {
+ return gc_retention_policy_;
+ }
+
+ // Does the space support allocation?
+ virtual bool CanAllocateInto() const {
+ return true;
+ }
+
+ // Is the given object contained within this space?
+ virtual bool Contains(const mirror::Object* obj) const = 0;
+
+ // The kind of space this: image, alloc, zygote, large object.
+ virtual SpaceType GetType() const = 0;
+
+ // Is this an image space, ie one backed by a memory mapped image file.
+ bool IsImageSpace() const {
+ return GetType() == kSpaceTypeImageSpace;
+ }
+ ImageSpace* AsImageSpace();
+
+ // Is this a dlmalloc backed allocation space?
+ bool IsDlMallocSpace() const {
+ SpaceType type = GetType();
+ return type == kSpaceTypeAllocSpace || type == kSpaceTypeZygoteSpace;
+ }
+ DlMallocSpace* AsDlMallocSpace();
+
+ // Is this the space allocated into by the Zygote and no-longer in use?
+ bool IsZygoteSpace() const {
+ return GetType() == kSpaceTypeZygoteSpace;
+ }
+ DlMallocSpace* AsZygoteSpace();
+
+ // Does this space hold large objects and implement the large object space abstraction?
+ bool IsLargeObjectSpace() const {
+ return GetType() == kSpaceTypeLargeObjectSpace;
+ }
+ LargeObjectSpace* AsLargeObjectSpace();
+
+ virtual ~Space() {}
+
+ protected:
+ Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+ void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
+ gc_retention_policy_ = gc_retention_policy;
+ }
+
+ // Name of the space that may vary due to the Zygote fork.
+ std::string name_;
+
+ private:
+ // When should objects within this space be reclaimed? Not constant as we vary it in the case
+ // of Zygote forking.
+ GcRetentionPolicy gc_retention_policy_;
+
+ friend class art::gc::Heap;
+
+ DISALLOW_COPY_AND_ASSIGN(Space);
+};
+std::ostream& operator<<(std::ostream& os, const Space& space);
+
+// AllocSpace interface.
+class AllocSpace {
+ public:
+ // Number of bytes currently allocated.
+ virtual uint64_t GetBytesAllocated() const = 0;
+ // Number of objects currently allocated.
+ virtual uint64_t GetObjectsAllocated() const = 0;
+ // Number of bytes allocated since the space was created.
+ virtual uint64_t GetTotalBytesAllocated() const = 0;
+ // Number of objects allocated since the space was created.
+ virtual uint64_t GetTotalObjectsAllocated() const = 0;
+
+ // Allocate num_bytes without allowing growth.
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const mirror::Object* obj) = 0;
+
+ // Returns how many bytes were freed.
+ virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
+
+ // Returns how many bytes were freed.
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
+
+ protected:
+ AllocSpace() {}
+ virtual ~AllocSpace() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AllocSpace);
+};
+
+// Continuous spaces have bitmaps, and an address range. Although not required, objects within
+// continuous spaces can be marked in the card table.
+class ContinuousSpace : public Space {
+ public:
+ // Address at which the space begins
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
+ // Current size of space
+ size_t Size() const {
+ return End() - Begin();
+ }
+
+ virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
+ virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
+
+ // Is object within this space? We check to see if the pointer is beyond the end first as
+ // continuous spaces are iterated over from low to high.
+ bool HasAddress(const mirror::Object* obj) const {
+ const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+ return byte_ptr < End() && byte_ptr >= Begin();
+ }
+
+ bool Contains(const mirror::Object* obj) const {
+ return HasAddress(obj);
+ }
+
+ virtual ~ContinuousSpace() {}
+
+ protected:
+ ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
+ byte* begin, byte* end) :
+ Space(name, gc_retention_policy), begin_(begin), end_(end) {
+ }
+
+
+ // The beginning of the storage for fast access.
+ byte* const begin_;
+
+ // Current end of the space.
+ byte* end_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
+};
+
+// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
+// the card table can't cover these objects and so the write barrier shouldn't be triggered. This
+// is suitable for use for large primitive arrays.
+class DiscontinuousSpace : public Space {
+ public:
+ accounting::SpaceSetMap* GetLiveObjects() const {
+ return live_objects_.get();
+ }
+
+ accounting::SpaceSetMap* GetMarkObjects() const {
+ return mark_objects_.get();
+ }
+
+ virtual ~DiscontinuousSpace() {}
+
+ protected:
+ DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+ UniquePtr<accounting::SpaceSetMap> live_objects_;
+ UniquePtr<accounting::SpaceSetMap> mark_objects_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
+};
+
+class MemMapSpace : public ContinuousSpace {
+ public:
+ // Maximum which the mapped space can grow to.
+ virtual size_t Capacity() const {
+ return mem_map_->Size();
+ }
+
+ // Size of the space without a limit on its growth. By default this is just the Capacity, but
+ // for the allocation space we support starting with a small heap and then extending it.
+ virtual size_t NonGrowthLimitCapacity() const {
+ return Capacity();
+ }
+
+ protected:
+ MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
+ GcRetentionPolicy gc_retention_policy)
+ : ContinuousSpace(name, gc_retention_policy,
+ mem_map->Begin(), mem_map->Begin() + initial_size),
+ mem_map_(mem_map) {
+ }
+
+ MemMap* GetMemMap() {
+ return mem_map_.get();
+ }
+
+ const MemMap* GetMemMap() const {
+ return mem_map_.get();
+ }
+
+ private:
+ // Underlying storage of the space
+ UniquePtr<MemMap> mem_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_SPACE_H_
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
new file mode 100644
index 0000000..08ae894
--- /dev/null
+++ b/runtime/gc/space/space_test.cc
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dlmalloc_space.h"
+
+#include "common_test.h"
+#include "globals.h"
+#include "UniquePtr.h"
+
+#include <stdint.h>
+
+namespace art {
+namespace gc {
+namespace space {
+
+class SpaceTest : public CommonTest {
+ public:
+ void SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
+ int round, size_t growth_limit);
+ void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
+
+ void AddContinuousSpace(ContinuousSpace* space) {
+ Runtime::Current()->GetHeap()->AddContinuousSpace(space);
+ }
+};
+
+TEST_F(SpaceTest, Init) {
+ {
+ // Init < max == growth
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 32 * MB, 32 * MB, NULL));
+ EXPECT_TRUE(space.get() != NULL);
+ }
+ {
+ // Init == max == growth
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 16 * MB, 16 * MB, NULL));
+ EXPECT_TRUE(space.get() != NULL);
+ }
+ {
+ // Init > max == growth
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 32 * MB, 16 * MB, 16 * MB, NULL));
+ EXPECT_TRUE(space.get() == NULL);
+ }
+ {
+ // Growth == init < max
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 16 * MB, 32 * MB, NULL));
+ EXPECT_TRUE(space.get() != NULL);
+ }
+ {
+ // Growth < init < max
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 8 * MB, 32 * MB, NULL));
+ EXPECT_TRUE(space.get() == NULL);
+ }
+ {
+ // Init < growth < max
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 8 * MB, 16 * MB, 32 * MB, NULL));
+ EXPECT_TRUE(space.get() != NULL);
+ }
+ {
+ // Init < max < growth
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 8 * MB, 32 * MB, 16 * MB, NULL));
+ EXPECT_TRUE(space.get() == NULL);
+ }
+}
+
+// TODO: This test is not very good, we should improve it.
+// The test should do more allocations before the creation of the ZygoteSpace, and then do
+// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
+// the GC works with the ZygoteSpace.
+TEST_F(SpaceTest, ZygoteSpace) {
+ DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ ASSERT_TRUE(space != NULL);
+
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ AddContinuousSpace(space);
+ Thread* self = Thread::Current();
+
+ // Succeeds, fits without adjusting the footprint limit.
+ mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
+ EXPECT_TRUE(ptr1 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
+ EXPECT_TRUE(ptr2 == NULL);
+
+ // Succeeds, adjusts the footprint.
+ mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+ EXPECT_TRUE(ptr3 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
+ EXPECT_TRUE(ptr4 == NULL);
+
+ // Also fails, requires a higher allowed footprint.
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+ EXPECT_TRUE(ptr5 == NULL);
+
+ // Release some memory.
+ size_t free3 = space->AllocationSize(ptr3);
+ EXPECT_EQ(free3, space->Free(self, ptr3));
+ EXPECT_LE(8U * MB, free3);
+
+ // Succeeds, now that memory has been freed.
+ void* ptr6 = space->AllocWithGrowth(self, 9 * MB);
+ EXPECT_TRUE(ptr6 != NULL);
+
+ // Final clean up.
+ size_t free1 = space->AllocationSize(ptr1);
+ space->Free(self, ptr1);
+ EXPECT_LE(1U * MB, free1);
+
+ // Make sure that the zygote space isn't directly at the start of the space.
+ space->Alloc(self, 1U * MB);
+ space = space->CreateZygoteSpace();
+
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ AddContinuousSpace(space);
+
+ // Succeeds, fits without adjusting the footprint limit.
+ ptr1 = space->Alloc(self, 1 * MB);
+ EXPECT_TRUE(ptr1 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ ptr2 = space->Alloc(self, 8 * MB);
+ EXPECT_TRUE(ptr2 == NULL);
+
+ // Succeeds, adjusts the footprint.
+ ptr3 = space->AllocWithGrowth(self, 2 * MB);
+ EXPECT_TRUE(ptr3 != NULL);
+ space->Free(self, ptr3);
+
+ // Final clean up.
+ free1 = space->AllocationSize(ptr1);
+ space->Free(self, ptr1);
+ EXPECT_LE(1U * MB, free1);
+}
+
+TEST_F(SpaceTest, AllocAndFree) {
+ DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ ASSERT_TRUE(space != NULL);
+ Thread* self = Thread::Current();
+
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ AddContinuousSpace(space);
+
+ // Succeeds, fits without adjusting the footprint limit.
+ mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
+ EXPECT_TRUE(ptr1 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
+ EXPECT_TRUE(ptr2 == NULL);
+
+ // Succeeds, adjusts the footprint.
+ mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+ EXPECT_TRUE(ptr3 != NULL);
+
+ // Fails, requires a higher footprint limit.
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
+ EXPECT_TRUE(ptr4 == NULL);
+
+ // Also fails, requires a higher allowed footprint.
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+ EXPECT_TRUE(ptr5 == NULL);
+
+ // Release some memory.
+ size_t free3 = space->AllocationSize(ptr3);
+ space->Free(self, ptr3);
+ EXPECT_LE(8U * MB, free3);
+
+ // Succeeds, now that memory has been freed.
+ void* ptr6 = space->AllocWithGrowth(self, 9 * MB);
+ EXPECT_TRUE(ptr6 != NULL);
+
+ // Final clean up.
+ size_t free1 = space->AllocationSize(ptr1);
+ space->Free(self, ptr1);
+ EXPECT_LE(1U * MB, free1);
+}
+
+TEST_F(SpaceTest, AllocAndFreeList) {
+ DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ ASSERT_TRUE(space != NULL);
+
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ AddContinuousSpace(space);
+ Thread* self = Thread::Current();
+
+ // Succeeds, fits without adjusting the max allowed footprint.
+ mirror::Object* lots_of_objects[1024];
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ lots_of_objects[i] = space->Alloc(self, 16);
+ EXPECT_TRUE(lots_of_objects[i] != NULL);
+ }
+
+ // Release memory and check pointers are NULL
+ space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ EXPECT_TRUE(lots_of_objects[i] == NULL);
+ }
+
+ // Succeeds, fits by adjusting the max allowed footprint.
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ lots_of_objects[i] = space->AllocWithGrowth(self, 1024);
+ EXPECT_TRUE(lots_of_objects[i] != NULL);
+ }
+
+ // Release memory and check pointers are NULL
+ space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ EXPECT_TRUE(lots_of_objects[i] == NULL);
+ }
+}
+
+static size_t test_rand() {
+ // TODO: replace this with something random yet deterministic
+ return rand();
+}
+
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
+ int round, size_t growth_limit) {
+ if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
+ ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
+ // No allocation can succeed
+ return;
+ }
+ // Mspace for raw dlmalloc operations
+ void* mspace = space->GetMspace();
+
+ // mspace's footprint equals amount of resources requested from system
+ size_t footprint = mspace_footprint(mspace);
+
+ // mspace must at least have its book keeping allocated
+ EXPECT_GT(footprint, 0u);
+
+ // mspace but it shouldn't exceed the initial size
+ EXPECT_LE(footprint, growth_limit);
+
+ // space's size shouldn't exceed the initial size
+ EXPECT_LE(space->Size(), growth_limit);
+
+ // this invariant should always hold or else the mspace has grown to be larger than what the
+ // space believes its size is (which will break invariants)
+ EXPECT_GE(space->Size(), footprint);
+
+ // Fill the space with lots of small objects up to the growth limit
+ size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
+ UniquePtr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
+ size_t last_object = 0; // last object for which allocation succeeded
+ size_t amount_allocated = 0; // amount of space allocated
+ Thread* self = Thread::Current();
+ for (size_t i = 0; i < max_objects; i++) {
+ size_t alloc_fails = 0; // number of failed allocations
+ size_t max_fails = 30; // number of times we fail allocation before giving up
+ for (; alloc_fails < max_fails; alloc_fails++) {
+ size_t alloc_size;
+ if (object_size > 0) {
+ alloc_size = object_size;
+ } else {
+ alloc_size = test_rand() % static_cast<size_t>(-object_size);
+ if (alloc_size < 8) {
+ alloc_size = 8;
+ }
+ }
+ mirror::Object* object;
+ if (round <= 1) {
+ object = space->Alloc(self, alloc_size);
+ } else {
+ object = space->AllocWithGrowth(self, alloc_size);
+ }
+ footprint = mspace_footprint(mspace);
+ EXPECT_GE(space->Size(), footprint); // invariant
+ if (object != NULL) { // allocation succeeded
+ lots_of_objects.get()[i] = object;
+ size_t allocation_size = space->AllocationSize(object);
+ if (object_size > 0) {
+ EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+ } else {
+ EXPECT_GE(allocation_size, 8u);
+ }
+ amount_allocated += allocation_size;
+ break;
+ }
+ }
+ if (alloc_fails == max_fails) {
+ last_object = i;
+ break;
+ }
+ }
+ CHECK_NE(last_object, 0u); // we should have filled the space
+ EXPECT_GT(amount_allocated, 0u);
+
+ // We shouldn't have gone past the growth_limit
+ EXPECT_LE(amount_allocated, growth_limit);
+ EXPECT_LE(footprint, growth_limit);
+ EXPECT_LE(space->Size(), growth_limit);
+
+ // footprint and size should agree with amount allocated
+ EXPECT_GE(footprint, amount_allocated);
+ EXPECT_GE(space->Size(), amount_allocated);
+
+ // Release storage in a semi-adhoc manner
+ size_t free_increment = 96;
+ while (true) {
+ // Give the space a haircut
+ space->Trim();
+
+ // Bounds sanity
+ footprint = mspace_footprint(mspace);
+ EXPECT_LE(amount_allocated, growth_limit);
+ EXPECT_GE(footprint, amount_allocated);
+ EXPECT_LE(footprint, growth_limit);
+ EXPECT_GE(space->Size(), amount_allocated);
+ EXPECT_LE(space->Size(), growth_limit);
+
+ if (free_increment == 0) {
+ break;
+ }
+
+ // Free some objects
+ for (size_t i = 0; i < last_object; i += free_increment) {
+ mirror::Object* object = lots_of_objects.get()[i];
+ if (object == NULL) {
+ continue;
+ }
+ size_t allocation_size = space->AllocationSize(object);
+ if (object_size > 0) {
+ EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+ } else {
+ EXPECT_GE(allocation_size, 8u);
+ }
+ space->Free(self, object);
+ lots_of_objects.get()[i] = NULL;
+ amount_allocated -= allocation_size;
+ footprint = mspace_footprint(mspace);
+ EXPECT_GE(space->Size(), footprint); // invariant
+ }
+
+ free_increment >>= 1;
+ }
+
+ // All memory was released, try a large allocation to check freed memory is being coalesced
+ mirror::Object* large_object;
+ size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
+ if (round <= 1) {
+ large_object = space->Alloc(self, three_quarters_space);
+ } else {
+ large_object = space->AllocWithGrowth(self, three_quarters_space);
+ }
+ EXPECT_TRUE(large_object != NULL);
+
+ // Sanity check footprint
+ footprint = mspace_footprint(mspace);
+ EXPECT_LE(footprint, growth_limit);
+ EXPECT_GE(space->Size(), footprint);
+ EXPECT_LE(space->Size(), growth_limit);
+
+ // Clean up
+ space->Free(self, large_object);
+
+ // Sanity check footprint
+ footprint = mspace_footprint(mspace);
+ EXPECT_LE(footprint, growth_limit);
+ EXPECT_GE(space->Size(), footprint);
+ EXPECT_LE(space->Size(), growth_limit);
+}
+
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size) {
+ size_t initial_size = 4 * MB;
+ size_t growth_limit = 8 * MB;
+ size_t capacity = 16 * MB;
+ DlMallocSpace* space(DlMallocSpace::Create("test", initial_size, growth_limit, capacity, NULL));
+ ASSERT_TRUE(space != NULL);
+
+ // Basic sanity
+ EXPECT_EQ(space->Capacity(), growth_limit);
+ EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
+
+ // Make space findable to the heap, will also delete space when runtime is cleaned up
+ AddContinuousSpace(space);
+
+ // In this round we don't allocate with growth and therefore can't grow past the initial size.
+ // This effectively makes the growth_limit the initial_size, so assert this.
+ SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
+ SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
+ // Remove growth limit
+ space->ClearGrowthLimit();
+ EXPECT_EQ(space->Capacity(), capacity);
+ SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
+}
+
+#define TEST_SizeFootPrintGrowthLimitAndTrim(name, size) \
+ TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
+ SizeFootPrintGrowthLimitAndTrimDriver(size); \
+ } \
+ TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
+ SizeFootPrintGrowthLimitAndTrimDriver(-size); \
+ }
+
+// Each size test is its own test so that we get a fresh heap each time
+TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_8B) {
+ SizeFootPrintGrowthLimitAndTrimDriver(8);
+}
+TEST_SizeFootPrintGrowthLimitAndTrim(16B, 16)
+TEST_SizeFootPrintGrowthLimitAndTrim(24B, 24)
+TEST_SizeFootPrintGrowthLimitAndTrim(32B, 32)
+TEST_SizeFootPrintGrowthLimitAndTrim(64B, 64)
+TEST_SizeFootPrintGrowthLimitAndTrim(128B, 128)
+TEST_SizeFootPrintGrowthLimitAndTrim(1KB, 1 * KB)
+TEST_SizeFootPrintGrowthLimitAndTrim(4KB, 4 * KB)
+TEST_SizeFootPrintGrowthLimitAndTrim(1MB, 1 * MB)
+TEST_SizeFootPrintGrowthLimitAndTrim(4MB, 4 * MB)
+TEST_SizeFootPrintGrowthLimitAndTrim(8MB, 8 * MB)
+
+} // namespace space
+} // namespace gc
+} // namespace art