summaryrefslogtreecommitdiffstats
path: root/runtime/gc/heap-inl.h
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-05-20 16:40:37 -0700
committerIan Rogers <irogers@google.com>2014-05-20 21:17:03 -0700
commit3e5cf305db800b2989ad57b7cde8fb3cc9fa1b9e (patch)
tree52a737323ebd505cf37ca0e4b2dcee6524fba07f /runtime/gc/heap-inl.h
parent27a2b70f612af9afc0fb5392fb10059f6a0a3569 (diff)
downloadart-3e5cf305db800b2989ad57b7cde8fb3cc9fa1b9e.zip
art-3e5cf305db800b2989ad57b7cde8fb3cc9fa1b9e.tar.gz
art-3e5cf305db800b2989ad57b7cde8fb3cc9fa1b9e.tar.bz2
Begin migration of art::Atomic to std::atomic.
Change-Id: I4858d9cbed95e5ca560956b9dabd976cebe68333
Diffstat (limited to 'runtime/gc/heap-inl.h')
-rw-r--r--runtime/gc/heap-inl.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 7cee5a0..03b72b6 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -96,7 +96,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
CHECK_LE(obj->SizeOf(), usable_size);
}
const size_t new_num_bytes_allocated =
- static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
+ static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated)) + bytes_allocated;
// TODO: Deprecate.
if (kInstrumented) {
if (Runtime::Current()->HasStatsEnabled()) {
@@ -264,7 +264,7 @@ inline Heap::AllocationTimer::~AllocationTimer() {
// Only if the allocation succeeded, record the time.
if (allocated_obj != nullptr) {
uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
- heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_);
+ heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(allocation_end_time - allocation_start_time_);
}
}
};
@@ -279,7 +279,7 @@ inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) co
template <bool kGrow>
inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
- size_t new_footprint = num_bytes_allocated_ + alloc_size;
+ size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
if (UNLIKELY(new_footprint > growth_limit_)) {
return true;