summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/heap-inl.h8
-rw-r--r--runtime/gc/heap.h3
2 files changed, 7 insertions, 4 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 89ded0b..3d2f7ea 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -107,7 +107,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
// optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
// the allocator_type should be constant propagated.
if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
- CheckConcurrentGC(self, new_num_bytes_allocated, obj);
+ CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
self->VerifyStack();
@@ -280,11 +280,13 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t
}
inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
- mirror::Object* obj) {
+ mirror::Object** obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
// The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
- SirtRef<mirror::Object> ref(self, obj);
+ SirtRef<mirror::Object> ref(self, *obj);
RequestConcurrentGC(self);
+ // Restore obj in case it moved.
+ *obj = ref.get();
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a90af27..92da7e9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -575,7 +575,8 @@ class Heap {
bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
- mirror::Object* obj);
+ mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>