summaryrefslogtreecommitdiffstats
path: root/runtime/gc/collector
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-01-26 19:19:26 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2015-01-26 19:19:26 +0000
commit2ca19c23b9bc64c743a4cdbe9282ad4d995b398d (patch)
treea0f2c6bdc9464f9c03424e6fc9db70e0cd26a96a /runtime/gc/collector
parentcfc56e066d20a1ec31ce95be621c1aaf5565c954 (diff)
parentcb535da36915f9d10bec3880b46f1de1f7a69f22 (diff)
downloadart-2ca19c23b9bc64c743a4cdbe9282ad4d995b398d.zip
art-2ca19c23b9bc64c743a4cdbe9282ad4d995b398d.tar.gz
art-2ca19c23b9bc64c743a4cdbe9282ad4d995b398d.tar.bz2
Merge "Change AtomicStack to use StackReference"
Diffstat (limited to 'runtime/gc/collector')
-rw-r--r--runtime/gc/collector/concurrent_copying.cc21
-rw-r--r--runtime/gc/collector/concurrent_copying.h2
-rw-r--r--runtime/gc/collector/mark_compact.cc6
-rw-r--r--runtime/gc/collector/mark_compact.h6
-rw-r--r--runtime/gc/collector/mark_sweep.cc42
-rw-r--r--runtime/gc/collector/mark_sweep.h23
-rw-r--r--runtime/gc/collector/semi_space.cc6
-rw-r--r--runtime/gc/collector/semi_space.h6
8 files changed, 57 insertions, 55 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 5fa3c8b..754e217 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -607,9 +607,9 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
// The alloc stack.
{
ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
- for (mirror::Object** it = heap_->allocation_stack_->Begin(),
- **end = heap_->allocation_stack_->End(); it < end; ++it) {
- mirror::Object* obj = *it;
+ for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
+ it < end; ++it) {
+ mirror::Object* const obj = it->AsMirrorPtr();
if (obj != nullptr && obj->GetClass() != nullptr) {
// TODO: need to call this only if obj is alive?
ref_visitor(obj);
@@ -845,14 +845,14 @@ void ConcurrentCopying::ClearBlackPtrs() {
// Objects on the allocation stack?
if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
size_t count = GetAllocationStack()->Size();
- mirror::Object** it = GetAllocationStack()->Begin();
- mirror::Object** end = GetAllocationStack()->End();
+ auto* it = GetAllocationStack()->Begin();
+ auto* end = GetAllocationStack()->End();
for (size_t i = 0; i < count; ++i, ++it) {
- CHECK(it < end);
- mirror::Object* obj = *it;
+ CHECK_LT(it, end);
+ mirror::Object* obj = it->AsMirrorPtr();
if (obj != nullptr) {
// Must have been cleared above.
- CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr()) << obj;
+ CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
}
}
}
@@ -1446,10 +1446,7 @@ mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
QuasiAtomic::ThreadFenceAcquire();
accounting::ObjectStack* alloc_stack = GetAllocationStack();
- mirror::Object** begin = alloc_stack->Begin();
- // Important to read end once as it could be concurrently updated and screw up std::find().
- mirror::Object** end = alloc_stack->End();
- return std::find(begin, end, ref) != end;
+ return alloc_stack->Contains(ref);
}
mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 43f520a..d0e0446 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -233,7 +233,7 @@ class ConcurrentCopying : public GarbageCollector {
void SetFwdPtr(mirror::Object* from_ref, mirror::Object* to_ref)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);;
- void SwapStacks(Thread* self);
+ void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RecordLiveStackFreezeSize(Thread* self);
void ComputeUnevacFromSpaceLiveRatio();
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 18af005..ff3c893 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -274,11 +274,11 @@ void MarkCompact::ReclaimPhase() {
}
void MarkCompact::ResizeMarkStack(size_t new_size) {
- std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+ std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
CHECK_LE(mark_stack_->Size(), new_size);
mark_stack_->Resize(new_size);
- for (const auto& obj : temp) {
- mark_stack_->PushBack(obj);
+ for (auto& obj : temp) {
+ mark_stack_->PushBack(obj.AsMirrorPtr());
}
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index f6d473d..06304bf 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -46,7 +46,7 @@ class Heap;
namespace accounting {
template <typename T> class AtomicStack;
- typedef AtomicStack<mirror::Object*> ObjectStack;
+ typedef AtomicStack<mirror::Object> ObjectStack;
} // namespace accounting
namespace space {
@@ -156,13 +156,13 @@ class MarkCompact : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size);
+ void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if we should sweep the space.
bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj);
+ void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 04fb694..1959c09 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -330,11 +330,11 @@ void MarkSweep::ResizeMarkStack(size_t new_size) {
// Someone else acquired the lock and expanded the mark stack before us.
return;
}
- std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+ std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
CHECK_LE(mark_stack_->Size(), new_size);
mark_stack_->Resize(new_size);
- for (const auto& obj : temp) {
- mark_stack_->PushBack(obj);
+ for (auto& obj : temp) {
+ mark_stack_->PushBack(obj.AsMirrorPtr());
}
}
@@ -554,7 +554,7 @@ template <bool kUseFinger = false>
class MarkStackTask : public Task {
public:
MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
- Object** mark_stack)
+ StackReference<Object>* mark_stack)
: mark_sweep_(mark_sweep),
thread_pool_(thread_pool),
mark_stack_pos_(mark_stack_size) {
@@ -627,11 +627,11 @@ class MarkStackTask : public Task {
MarkSweep* const mark_sweep_;
ThreadPool* const thread_pool_;
// Thread local mark stack for this task.
- Object* mark_stack_[kMaxSize];
+ StackReference<Object> mark_stack_[kMaxSize];
// Mark stack position.
size_t mark_stack_pos_;
- void MarkStackPush(Object* obj) ALWAYS_INLINE {
+ ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
@@ -641,7 +641,7 @@ class MarkStackTask : public Task {
}
DCHECK(obj != nullptr);
DCHECK_LT(mark_stack_pos_, kMaxSize);
- mark_stack_[mark_stack_pos_++] = obj;
+ mark_stack_[mark_stack_pos_++].Assign(obj);
}
virtual void Finalize() {
@@ -660,7 +660,7 @@ class MarkStackTask : public Task {
Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
- Object* mark_stack_obj = mark_stack_[--mark_stack_pos_];
+ Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
@@ -674,7 +674,7 @@ class MarkStackTask : public Task {
if (UNLIKELY(mark_stack_pos_ == 0)) {
break;
}
- obj = mark_stack_[--mark_stack_pos_];
+ obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
}
DCHECK(obj != nullptr);
visitor(obj);
@@ -687,7 +687,7 @@ class CardScanTask : public MarkStackTask<false> {
CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap,
uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
- Object** mark_stack_obj, bool clear_card)
+ StackReference<Object>* mark_stack_obj, bool clear_card)
: MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
bitmap_(bitmap),
begin_(begin),
@@ -742,8 +742,8 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
GetTimings());
// Try to take some of the mark stack since we can pass this off to the worker tasks.
- Object** mark_stack_begin = mark_stack_->Begin();
- Object** mark_stack_end = mark_stack_->End();
+ StackReference<Object>* mark_stack_begin = mark_stack_->Begin();
+ StackReference<Object>* mark_stack_end = mark_stack_->End();
const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
// Estimated number of work tasks we will create.
const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
@@ -954,9 +954,9 @@ mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg
void MarkSweep::VerifyIsLive(const Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
- accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
- CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
- allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
+ // TODO: Consider live stack? Has this code bitrotted?
+ CHECK(!heap_->allocation_stack_->Contains(obj))
+ << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
}
}
@@ -1025,7 +1025,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
ObjectBytePair freed;
ObjectBytePair freed_los;
// How many objects are left in the array, modified after each space is swept.
- Object** objects = allocations->Begin();
+ StackReference<Object>* objects = allocations->Begin();
size_t count = allocations->Size();
// Change the order to ensure that the non-moving space last swept as an optimization.
std::vector<space::ContinuousSpace*> sweep_spaces;
@@ -1053,9 +1053,9 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
- Object** out = objects;
+ StackReference<Object>* out = objects;
for (size_t i = 0; i < count; ++i) {
- Object* obj = objects[i];
+ Object* const obj = objects[i].AsMirrorPtr();
if (kUseThreadLocalAllocationStack && obj == nullptr) {
continue;
}
@@ -1072,7 +1072,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
chunk_free_buffer[chunk_free_pos++] = obj;
}
} else {
- *(out++) = obj;
+ (out++)->Assign(obj);
}
}
if (chunk_free_pos > 0) {
@@ -1094,7 +1094,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
std::swap(large_live_objects, large_mark_objects);
}
for (size_t i = 0; i < count; ++i) {
- Object* obj = objects[i];
+ Object* const obj = objects[i].AsMirrorPtr();
// Handle large objects.
if (kUseThreadLocalAllocationStack && obj == nullptr) {
continue;
@@ -1195,7 +1195,7 @@ void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
static_cast<size_t>(MarkStackTask<false>::kMaxSize));
CHECK_GT(chunk_size, 0U);
// Split the current mark stack up into work tasks.
- for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
+ for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
it += delta;
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index b787327..3f99e21 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -47,7 +47,7 @@ class Heap;
namespace accounting {
template<typename T> class AtomicStack;
- typedef AtomicStack<mirror::Object*> ObjectStack;
+ typedef AtomicStack<mirror::Object> ObjectStack;
} // namespace accounting
namespace collector {
@@ -136,7 +136,8 @@ class MarkSweep : public GarbageCollector {
// Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
// all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
- virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -162,13 +163,14 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void VerifySystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void VerifyIsLive(const mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg)
@@ -223,11 +225,12 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Marks an object atomically, safe to use from multiple threads.
- void MarkObjectNonNullParallel(mirror::Object* obj);
+ void MarkObjectNonNullParallel(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -238,8 +241,10 @@ class MarkSweep : public GarbageCollector {
NO_THREAD_SAFETY_ANALYSIS;
// Expand mark stack to 2x its current size.
- void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_);
- void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_);
+ void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns how many threads we should use for the current GC phase based on if we are paused,
// whether or not we care about pauses.
@@ -250,7 +255,7 @@ class MarkSweep : public GarbageCollector {
void VerifyRoot(const mirror::Object* root, const RootInfo& root_info) NO_THREAD_SAFETY_ANALYSIS;
// Push a single reference on a mark stack.
- void PushOnMarkStack(mirror::Object* obj);
+ void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Blackens objects grayed during a garbage collection.
void ScanGrayObjects(bool paused, uint8_t minimum_age)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index fcc601f..8660eff 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -421,11 +421,11 @@ void SemiSpace::ReclaimPhase() {
}
void SemiSpace::ResizeMarkStack(size_t new_size) {
- std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+ std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
CHECK_LE(mark_stack_->Size(), new_size);
mark_stack_->Resize(new_size);
- for (const auto& obj : temp) {
- mark_stack_->PushBack(obj);
+ for (auto& obj : temp) {
+ mark_stack_->PushBack(obj.AsMirrorPtr());
}
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index f8fced8..192fb14 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -45,7 +45,7 @@ class Heap;
namespace accounting {
template <typename T> class AtomicStack;
- typedef AtomicStack<mirror::Object*> ObjectStack;
+ typedef AtomicStack<mirror::Object> ObjectStack;
} // namespace accounting
namespace space {
@@ -178,13 +178,13 @@ class SemiSpace : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size);
+ void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if we should sweep the space.
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj);
+ void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)