summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-04-11 17:53:48 -0700
committerMathieu Chartier <mathieuc@google.com>2014-04-22 10:57:22 -0700
commit8585bad7be19ee4901333f7d02d1d4d3f04877d4 (patch)
treebe30d58fc4fd71a4fcbdffa0d19a4bf9aaaa4c50 /runtime/gc
parent82b1a81890970a8b07f9132aeae537a6c43df6b0 (diff)
downloadart-8585bad7be19ee4901333f7d02d1d4d3f04877d4.zip
art-8585bad7be19ee4901333f7d02d1d4d3f04877d4.tar.gz
art-8585bad7be19ee4901333f7d02d1d4d3f04877d4.tar.bz2
Return bytes freed from RosAlloc.
There was a problem with how RosAlloc space sweeping worked caused by using the object size in the FreeList call, this won't work well with class unloading since the object's class may be freed before the object. Bug: 13989231 Change-Id: I3df439c312310720fd34249334dec85030166fe9
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/allocator/rosalloc.cc48
-rw-r--r--runtime/gc/allocator/rosalloc.h9
-rw-r--r--runtime/gc/space/rosalloc_space.cc28
3 files changed, 46 insertions, 39 deletions
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index cbefa6a..0f2d6a9 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -279,7 +279,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
return nullptr;
}
-void RosAlloc::FreePages(Thread* self, void* ptr) {
+size_t RosAlloc::FreePages(Thread* self, void* ptr) {
lock_.AssertHeld(self);
size_t pm_idx = ToPageMapIndex(ptr);
DCHECK_LT(pm_idx, page_map_size_);
@@ -298,7 +298,7 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
<< static_cast<int>(pm_type) << ", ptr=" << std::hex
<< reinterpret_cast<intptr_t>(ptr);
- return;
+ return 0;
}
// Update the page map and count the number of pages.
size_t num_pages = 1;
@@ -422,6 +422,7 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
<< " into free_page_runs_";
}
+ return num_pages;
}
void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
@@ -460,12 +461,11 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
return r;
}
-void RosAlloc::FreeInternal(Thread* self, void* ptr) {
+size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
DCHECK_LE(base_, ptr);
DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
- bool free_from_run = false;
- Run* run = NULL;
+ Run* run = nullptr;
{
MutexLock mu(self, lock_);
DCHECK_LT(pm_idx, page_map_size_);
@@ -477,16 +477,14 @@ void RosAlloc::FreeInternal(Thread* self, void* ptr) {
switch (page_map_[pm_idx]) {
case kPageMapEmpty:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
case kPageMapLargeObject:
- FreePages(self, ptr);
- return;
+ return FreePages(self, ptr) * kPageSize;
case kPageMapLargeObjectPart:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
case kPageMapRun:
case kPageMapRunPart: {
- free_from_run = true;
size_t pi = pm_idx;
DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
// Find the beginning of the run.
@@ -501,18 +499,18 @@ void RosAlloc::FreeInternal(Thread* self, void* ptr) {
}
default:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
}
}
- if (LIKELY(free_from_run)) {
- DCHECK(run != NULL);
- FreeFromRun(self, ptr, run);
- }
+ DCHECK(run != nullptr);
+ const size_t size = IndexToBracketSize(run->size_bracket_idx_);
+ FreeFromRun(self, ptr, run);
+ return size;
}
-void RosAlloc::Free(Thread* self, void* ptr) {
+size_t RosAlloc::Free(Thread* self, void* ptr) {
ReaderMutexLock rmu(self, bulk_free_lock_);
- FreeInternal(self, ptr);
+ return FreeInternal(self, ptr);
}
RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
@@ -1077,13 +1075,14 @@ void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size
// the page map entry won't change. Disabled for now.
static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = false;
-void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
+size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
+ size_t freed_bytes = 0;
if (false) {
// Used only to test Free() as GC uses only BulkFree().
for (size_t i = 0; i < num_ptrs; ++i) {
- FreeInternal(self, ptrs[i]);
+ freed_bytes += FreeInternal(self, ptrs[i]);
}
- return;
+ return freed_bytes;
}
WriterMutexLock wmu(self, bulk_free_lock_);
@@ -1126,14 +1125,15 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
MutexLock mu(self, lock_);
- FreePages(self, ptr);
+ freed_bytes += FreePages(self, ptr) * kPageSize;
continue;
} else {
LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
}
- DCHECK(run != NULL);
+ DCHECK(run != nullptr);
// Set the bit in the bulk free bit map.
run->MarkBulkFreeBitMap(ptr);
+ freed_bytes += IndexToBracketSize(run->size_bracket_idx_);
#ifdef HAVE_ANDROID_OS
if (!run->to_be_bulk_freed_) {
run->to_be_bulk_freed_ = true;
@@ -1171,7 +1171,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
- FreePages(self, ptr);
+ freed_bytes += FreePages(self, ptr) * kPageSize;
} else {
LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
}
@@ -1180,6 +1180,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
DCHECK(run != NULL);
// Set the bit in the bulk free bit map.
run->MarkBulkFreeBitMap(ptr);
+ freed_bytes += IndexToBracketSize(run->size_bracket_idx_);
#ifdef HAVE_ANDROID_OS
if (!run->to_be_bulk_freed_) {
run->to_be_bulk_freed_ = true;
@@ -1306,6 +1307,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
}
}
}
+ return freed_bytes;
}
std::string RosAlloc::DumpPageMap() {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 5d9d75c..0c508b7 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -493,7 +493,8 @@ class RosAlloc {
// Page-granularity alloc/free
void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void FreePages(Thread* self, void* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Returns how many pages were freed.
+ size_t FreePages(Thread* self, void* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Allocate/free a run slot.
void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
@@ -506,7 +507,7 @@ class RosAlloc {
Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
// The internal of non-bulk Free().
- void FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
+ size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
// Allocates large objects.
void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
@@ -518,9 +519,9 @@ class RosAlloc {
~RosAlloc();
void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
LOCKS_EXCLUDED(lock_);
- void Free(Thread* self, void* ptr)
+ size_t Free(Thread* self, void* ptr)
LOCKS_EXCLUDED(bulk_free_lock_);
- void BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
+ size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(void* ptr);
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 5a7d941..a5a6da0 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -33,6 +33,10 @@ namespace gc {
namespace space {
static constexpr bool kPrefetchDuringRosAllocFreeList = true;
+static constexpr size_t kPrefetchLookAhead = 8;
+// Use this only for verification, it is not safe to use since the class of the object may have
+// been freed.
+static constexpr bool kVerifyFreedBytes = false;
// TODO: Fix
// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
@@ -172,27 +176,24 @@ size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
CHECK(ptr != NULL);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
- const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
if (kRecentFreeCount > 0) {
MutexLock mu(self, lock_);
RegisterRecentFree(ptr);
}
- rosalloc_->Free(self, ptr);
- return bytes_freed;
+ return rosalloc_->Free(self, ptr);
}
size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- DCHECK(ptrs != NULL);
+ DCHECK(ptrs != nullptr);
- // Don't need the lock to calculate the size of the freed pointers.
- size_t bytes_freed = 0;
+ size_t verify_bytes = 0;
for (size_t i = 0; i < num_ptrs; i++) {
- mirror::Object* ptr = ptrs[i];
- const size_t look_ahead = 8;
- if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
- __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
+ if (kPrefetchDuringRosAllocFreeList && i + kPrefetchLookAhead < num_ptrs) {
+ __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
+ }
+ if (kVerifyFreedBytes) {
+ verify_bytes += AllocationSizeNonvirtual(ptrs[i], nullptr);
}
- bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
}
if (kRecentFreeCount > 0) {
@@ -216,7 +217,10 @@ size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p
CHECK_EQ(num_broken_ptrs, 0u);
}
- rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
+ const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
+ if (kVerifyFreedBytes) {
+ CHECK_EQ(verify_bytes, bytes_freed);
+ }
return bytes_freed;
}