summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2013-11-19 18:00:50 -0800
committerIan Rogers <irogers@google.com>2013-12-20 08:01:57 -0800
commitb122a4bbed34ab22b4c1541ee25e5cf22f12a926 (patch)
tree624f16271f4481a8fd5aa2f607385f490dc7b3ae /runtime/gc
parente40687d053b89c495b6fbeb7a766b01c9c7e039c (diff)
downloadart-b122a4bbed34ab22b4c1541ee25e5cf22f12a926.zip
art-b122a4bbed34ab22b4c1541ee25e5cf22f12a926.tar.gz
art-b122a4bbed34ab22b4c1541ee25e5cf22f12a926.tar.bz2
Tidy up memory barriers.
Change-Id: I937ea93e6df1835ecfe2d4bb7d84c24fe7fc097b
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/atomic_stack.h14
-rw-r--r--runtime/gc/collector/mark_sweep.cc16
-rw-r--r--runtime/gc/collector/semi_space.cc14
-rw-r--r--runtime/gc/heap-inl.h4
-rw-r--r--runtime/gc/heap.cc15
-rw-r--r--runtime/gc/space/bump_pointer_space-inl.h4
-rw-r--r--runtime/gc/space/bump_pointer_space.cc8
-rw-r--r--runtime/gc/space/image_space.cc2
8 files changed, 39 insertions, 38 deletions
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 8fa5b86..02e01b8 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -68,7 +68,7 @@ class AtomicStack {
// Stack overflow.
return false;
}
- } while (!back_index_.compare_and_swap(index, index + 1));
+ } while (!back_index_.CompareAndSwap(index, index + 1));
begin_[index] = value;
return true;
}
@@ -93,7 +93,7 @@ class AtomicStack {
// Take an item from the front of the stack.
T PopFront() {
int32_t index = front_index_;
- DCHECK_LT(index, back_index_.load());
+ DCHECK_LT(index, back_index_.Load());
front_index_ = front_index_ + 1;
return begin_[index];
}
@@ -101,7 +101,7 @@ class AtomicStack {
// Pop a number of elements.
void PopBackCount(int32_t n) {
DCHECK_GE(Size(), static_cast<size_t>(n));
- back_index_.fetch_sub(n);
+ back_index_.FetchAndSub(n);
}
bool IsEmpty() const {
@@ -132,11 +132,11 @@ class AtomicStack {
}
void Sort() {
- int32_t start_back_index = back_index_.load();
- int32_t start_front_index = front_index_.load();
+ int32_t start_back_index = back_index_.Load();
+ int32_t start_front_index = front_index_.Load();
std::sort(Begin(), End());
- CHECK_EQ(start_back_index, back_index_.load());
- CHECK_EQ(start_front_index, front_index_.load());
+ CHECK_EQ(start_back_index, back_index_.Load());
+ CHECK_EQ(start_front_index, front_index_.Load());
if (kIsDebugBuild) {
debug_is_sorted_ = true;
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 28cc510..cae2a54 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1109,8 +1109,8 @@ void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
// AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
heap->RecordFree(freed_objects, freed_bytes);
- mark_sweep->freed_objects_.fetch_add(freed_objects);
- mark_sweep->freed_bytes_.fetch_add(freed_bytes);
+ mark_sweep->freed_objects_.FetchAndAdd(freed_objects);
+ mark_sweep->freed_bytes_.FetchAndAdd(freed_bytes);
}
void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
@@ -1192,10 +1192,10 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
VLOG(heap) << "Freed " << freed_objects << "/" << count
<< " objects with size " << PrettySize(freed_bytes);
heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
- freed_objects_.fetch_add(freed_objects);
- freed_large_objects_.fetch_add(freed_large_objects);
- freed_bytes_.fetch_add(freed_bytes);
- freed_large_object_bytes_.fetch_add(freed_large_object_bytes);
+ freed_objects_.FetchAndAdd(freed_objects);
+ freed_large_objects_.FetchAndAdd(freed_large_objects);
+ freed_bytes_.FetchAndAdd(freed_bytes);
+ freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes);
timings_.EndSplit();
timings_.StartSplit("ResetStack");
@@ -1267,8 +1267,8 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
++freed_objects;
}
}
- freed_large_objects_.fetch_add(freed_objects);
- freed_large_object_bytes_.fetch_add(freed_bytes);
+ freed_large_objects_.FetchAndAdd(freed_objects);
+ freed_large_object_bytes_.FetchAndAdd(freed_bytes);
GetHeap()->RecordFree(freed_objects, freed_bytes);
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f29eadb..a4f7121 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -236,8 +236,8 @@ void SemiSpace::ReclaimPhase() {
int freed_bytes = from_bytes - to_bytes;
int freed_objects = from_objects - to_objects;
CHECK_GE(freed_bytes, 0);
- freed_bytes_.fetch_add(freed_bytes);
- freed_objects_.fetch_add(freed_objects);
+ freed_bytes_.FetchAndAdd(freed_bytes);
+ freed_objects_.FetchAndAdd(freed_objects);
heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes));
timings_.StartSplit("PreSweepingGcVerification");
@@ -332,7 +332,7 @@ Object* SemiSpace::MarkObject(Object* obj) {
// If out of space, fall back to the to-space.
forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
} else {
- GetHeap()->num_bytes_allocated_.fetch_add(bytes_promoted);
+ GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
bytes_promoted_ += bytes_promoted;
// Mark forward_address on the live bit map.
accounting::SpaceBitmap* live_bitmap = non_moving_space->GetLiveBitmap();
@@ -446,8 +446,8 @@ void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
heap->RecordFree(num_ptrs, freed_bytes);
- gc->freed_objects_.fetch_add(num_ptrs);
- gc->freed_bytes_.fetch_add(freed_bytes);
+ gc->freed_objects_.FetchAndAdd(num_ptrs);
+ gc->freed_bytes_.FetchAndAdd(freed_bytes);
}
void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
@@ -526,8 +526,8 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
++freed_objects;
}
}
- freed_large_objects_.fetch_add(freed_objects);
- freed_large_object_bytes_.fetch_add(freed_bytes);
+ freed_large_objects_.FetchAndAdd(freed_objects);
+ freed_large_object_bytes_.FetchAndAdd(freed_bytes);
GetHeap()->RecordFree(freed_objects, freed_bytes);
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9fb5760..af1b26b 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -61,7 +61,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
pre_fence_visitor(obj);
DCHECK_GT(bytes_allocated, 0u);
const size_t new_num_bytes_allocated =
- static_cast<size_t>(num_bytes_allocated_.fetch_add(bytes_allocated)) + bytes_allocated;
+ static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
// TODO: Deprecate.
if (kInstrumented) {
if (Runtime::Current()->HasStatsEnabled()) {
@@ -200,7 +200,7 @@ inline Heap::AllocationTimer::~AllocationTimer() {
// Only if the allocation succeeded, record the time.
if (allocated_obj != nullptr) {
uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
- heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_);
+ heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_);
}
}
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 61c66e7..e08106b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -413,13 +413,13 @@ void Heap::AddSpace(space::Space* space) {
void Heap::RegisterGCAllocation(size_t bytes) {
if (this != nullptr) {
- gc_memory_overhead_.fetch_add(bytes);
+ gc_memory_overhead_.FetchAndAdd(bytes);
}
}
void Heap::RegisterGCDeAllocation(size_t bytes) {
if (this != nullptr) {
- gc_memory_overhead_.fetch_sub(bytes);
+ gc_memory_overhead_.FetchAndSub(bytes);
}
}
@@ -802,7 +802,7 @@ void Heap::DumpSpaces(std::ostream& stream) {
void Heap::VerifyObjectBody(const mirror::Object* obj) {
CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
// Ignore early dawn of the universe verifications.
- if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.load()) < 10 * KB)) {
+ if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
return;
}
const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
@@ -847,7 +847,8 @@ void Heap::VerifyHeap() {
void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
- num_bytes_allocated_.fetch_sub(freed_bytes);
+ num_bytes_allocated_.FetchAndSub(freed_bytes);
+
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
thread_stats->freed_objects += freed_objects;
@@ -2082,7 +2083,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
native_need_to_run_finalization_ = false;
}
// Total number of native bytes allocated.
- native_bytes_allocated_.fetch_add(bytes);
+ native_bytes_allocated_.FetchAndAdd(bytes);
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
collector::kGcTypeFull;
@@ -2118,7 +2119,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
int expected_size, new_size;
do {
- expected_size = native_bytes_allocated_.load();
+ expected_size = native_bytes_allocated_.Load();
new_size = expected_size - bytes;
if (UNLIKELY(new_size < 0)) {
ScopedObjectAccess soa(env);
@@ -2127,7 +2128,7 @@ void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
"registered as allocated", bytes, expected_size).c_str());
break;
}
- } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
+ } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
}
int64_t Heap::GetTotalMemory() const {
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 82e96a4..ac20972 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -44,8 +44,8 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t
inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
if (ret != nullptr) {
- objects_allocated_.fetch_add(1);
- bytes_allocated_.fetch_add(num_bytes);
+ objects_allocated_.FetchAndAdd(1);
+ bytes_allocated_.FetchAndAdd(num_bytes);
}
return ret;
}
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 7ea202c..d5bc667 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -172,7 +172,7 @@ bool BumpPointerSpace::IsEmpty() const {
uint64_t BumpPointerSpace::GetBytesAllocated() {
// Start out pre-determined amount (blocks which are not being allocated into).
- uint64_t total = static_cast<uint64_t>(bytes_allocated_.load());
+ uint64_t total = static_cast<uint64_t>(bytes_allocated_.Load());
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -190,7 +190,7 @@ uint64_t BumpPointerSpace::GetBytesAllocated() {
uint64_t BumpPointerSpace::GetObjectsAllocated() {
// Start out pre-determined amount (blocks which are not being allocated into).
- uint64_t total = static_cast<uint64_t>(objects_allocated_.load());
+ uint64_t total = static_cast<uint64_t>(objects_allocated_.Load());
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -207,8 +207,8 @@ uint64_t BumpPointerSpace::GetObjectsAllocated() {
}
void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
- objects_allocated_.fetch_add(thread->thread_local_objects_);
- bytes_allocated_.fetch_add(thread->thread_local_pos_ - thread->thread_local_start_);
+ objects_allocated_.FetchAndAdd(thread->thread_local_objects_);
+ bytes_allocated_.FetchAndAdd(thread->thread_local_pos_ - thread->thread_local_start_);
thread->SetTLAB(nullptr, nullptr);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c6177bd..4777cc6 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -227,7 +227,7 @@ ImageSpace* ImageSpace::Init(const char* image_file_name, bool validate_oat_file
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
}
- size_t bitmap_index = bitmap_index_.fetch_add(1);
+ size_t bitmap_index = bitmap_index_.FetchAndAdd(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_file_name,
bitmap_index));
UniquePtr<accounting::SpaceBitmap> bitmap(