summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-21 16:50:40 -0700
committerMathieu Chartier <mathieuc@google.com>2015-04-22 12:44:27 -0700
commit2cebb24bfc3247d3e9be138a3350106737455918 (patch)
treed04d27d21b3c7733d784e303f01f873bb99e7770 /runtime/gc
parent1f02f1a7b3073b8fef07770a67fbf94afad317f0 (diff)
downloadart-2cebb24bfc3247d3e9be138a3350106737455918.zip
art-2cebb24bfc3247d3e9be138a3350106737455918.tar.gz
art-2cebb24bfc3247d3e9be138a3350106737455918.tar.bz2
Replace NULL with nullptr
Also fixed some lines that were too long, and a few other minor details. Change-Id: I6efba5fb6e03eb5d0a300fddb2a75bf8e2f175cb
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/atomic_stack.h4
-rw-r--r--runtime/gc/accounting/card_table.cc8
-rw-r--r--runtime/gc/accounting/card_table.h2
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h2
-rw-r--r--runtime/gc/accounting/space_bitmap.cc10
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc4
-rw-r--r--runtime/gc/allocator/rosalloc.cc30
-rw-r--r--runtime/gc/collector/mark_sweep.cc8
-rw-r--r--runtime/gc/collector/semi_space.cc2
-rw-r--r--runtime/gc/heap-inl.h2
-rw-r--r--runtime/gc/heap.cc12
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/gc/reference_queue.cc4
-rw-r--r--runtime/gc/space/bump_pointer_space.h2
-rw-r--r--runtime/gc/space/dlmalloc_space-inl.h6
-rw-r--r--runtime/gc/space/dlmalloc_space.cc6
-rw-r--r--runtime/gc/space/image_space.cc10
-rw-r--r--runtime/gc/space/image_space.h4
-rw-r--r--runtime/gc/space/large_object_space.cc8
-rw-r--r--runtime/gc/space/malloc_space.cc4
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/region_space.h2
-rw-r--r--runtime/gc/space/rosalloc_space-inl.h6
-rw-r--r--runtime/gc/space/rosalloc_space.cc18
24 files changed, 80 insertions, 80 deletions
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 5224d64..399832a 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -238,9 +238,9 @@ class AtomicStack {
std::string error_msg;
mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
+ CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_->Begin();
- CHECK(addr != NULL);
+ CHECK(addr != nullptr);
debug_is_sorted_ = true;
begin_ = reinterpret_cast<StackReference<T>*>(addr);
Reset();
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7879632..1a7b1a3 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -36,7 +36,7 @@ constexpr uint8_t CardTable::kCardDirty;
/*
* Maintain a card table from the write barrier. All writes of
- * non-NULL values to heap addresses should go through an entry in
+ * non-null values to heap addresses should go through an entry in
* WriteBarrier, and from there to here.
*
* The heap is divided into "cards" of GC_CARD_SIZE bytes, as
@@ -44,7 +44,7 @@ constexpr uint8_t CardTable::kCardDirty;
* data per card, to be used by the GC. The value of the byte will be
* one of GC_CARD_CLEAN or GC_CARD_DIRTY.
*
- * After any store of a non-NULL object pointer into a heap object,
+ * After any store of a non-null object pointer into a heap object,
* code is obliged to mark the card dirty. The setters in
* object.h [such as SetFieldObject] do this for you. The
* compiler also contains code to mark cards as dirty.
@@ -64,13 +64,13 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
std::unique_ptr<MemMap> mem_map(
MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
false, false, &error_msg));
- CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
+ CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* cardtable_begin = mem_map->Begin();
- CHECK(cardtable_begin != NULL);
+ CHECK(cardtable_begin != nullptr);
// We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
// kCardDirty, compute a offset value to make this the case
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 896cce5..75ef58a 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -43,7 +43,7 @@ namespace accounting {
template<size_t kAlignment> class SpaceBitmap;
// Maintain a card table from the the write barrier. All writes of
-// non-NULL values to heap addresses should go through an entry in
+// non-null values to heap addresses should go through an entry in
// WriteBarrier, and from there to here.
class CardTable {
public:
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 11347a5..ae91200 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -55,7 +55,7 @@ template<size_t kAlignment>
inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK(HasAddress(obj)) << obj;
- DCHECK(bitmap_begin_ != NULL);
+ DCHECK(bitmap_begin_ != nullptr);
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2da8325..84dadea 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -104,8 +104,8 @@ void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
- CHECK(bitmap_begin_ != NULL);
- CHECK(callback != NULL);
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK(callback != nullptr);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
uintptr_t* bitmap_begin = bitmap_begin_;
@@ -132,7 +132,7 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm
CHECK(mark_bitmap.bitmap_begin_ != nullptr);
CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
- CHECK(callback != NULL);
+ CHECK(callback != nullptr);
CHECK_LE(sweep_begin, sweep_end);
CHECK_GE(sweep_begin, live_bitmap.heap_begin_);
@@ -186,7 +186,7 @@ void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visite
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
- if (super != NULL) {
+ if (super != nullptr) {
WalkInstanceFields(visited, callback, obj, super, arg);
}
// Walk instance fields
@@ -233,7 +233,7 @@ void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited
int32_t length = obj_array->GetLength();
for (int32_t i = 0; i < length; i++) {
mirror::Object* value = obj_array->Get(i);
- if (value != NULL) {
+ if (value != nullptr) {
WalkFieldsInOrder(visited, callback, value, arg);
}
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 850325a..edb08ef 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -34,7 +34,7 @@ TEST_F(SpaceBitmapTest, Init) {
size_t heap_capacity = 16 * MB;
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
}
class BitmapVerify {
@@ -62,7 +62,7 @@ TEST_F(SpaceBitmapTest, ScanRange) {
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
// Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 515f124..85234dc 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -115,7 +115,7 @@ RosAlloc::~RosAlloc() {
void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
lock_.AssertHeld(self);
DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
- FreePageRun* res = NULL;
+ FreePageRun* res = nullptr;
const size_t req_byte_size = num_pages * kPageSize;
// Find the lowest address free page run that's large enough.
for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
@@ -157,8 +157,8 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
}
// Failed to allocate pages. Grow the footprint, if possible.
- if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
- FreePageRun* last_free_page_run = NULL;
+ if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
+ FreePageRun* last_free_page_run = nullptr;
size_t last_free_page_run_size;
auto it = free_page_runs_.rbegin();
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
@@ -218,7 +218,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
DCHECK(it != free_page_runs_.rend());
FreePageRun* fpr = *it;
if (kIsDebugBuild && last_free_page_run_size > 0) {
- DCHECK(last_free_page_run != NULL);
+ DCHECK(last_free_page_run != nullptr);
DCHECK_EQ(last_free_page_run, fpr);
}
size_t fpr_byte_size = fpr->ByteSize(this);
@@ -249,7 +249,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
res = fpr;
}
}
- if (LIKELY(res != NULL)) {
+ if (LIKELY(res != nullptr)) {
// Update the page map.
size_t page_map_idx = ToPageMapIndex(res);
for (size_t i = 0; i < num_pages; i++) {
@@ -286,7 +286,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
// Fail.
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocPages() : NULL";
+ LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
}
return nullptr;
}
@@ -468,7 +468,7 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
if (UNLIKELY(r == nullptr)) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
+ LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
}
return nullptr;
}
@@ -824,7 +824,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
// already in the non-full run set (i.e., it was full) insert it
// into the non-full run set.
if (run != current_runs_[idx]) {
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
auto pos = non_full_runs->find(run);
if (pos == non_full_runs->end()) {
DCHECK(run_was_full);
@@ -1275,7 +1275,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
// Check if the run should be moved to non_full_runs_ or
// free_page_runs_.
auto* non_full_runs = &non_full_runs_[idx];
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of the
// run.
@@ -1358,7 +1358,7 @@ std::string RosAlloc::DumpPageMap() {
stream << "RosAlloc PageMap: " << std::endl;
lock_.AssertHeld(Thread::Current());
size_t end = page_map_size_;
- FreePageRun* curr_fpr = NULL;
+ FreePageRun* curr_fpr = nullptr;
size_t curr_fpr_size = 0;
size_t remaining_curr_fpr_size = 0;
size_t num_running_empty_pages = 0;
@@ -1373,7 +1373,7 @@ std::string RosAlloc::DumpPageMap() {
// Encountered a fresh free page run.
DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
DCHECK(fpr->IsFree());
- DCHECK(curr_fpr == NULL);
+ DCHECK(curr_fpr == nullptr);
DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
curr_fpr = fpr;
curr_fpr_size = fpr->ByteSize(this);
@@ -1384,7 +1384,7 @@ std::string RosAlloc::DumpPageMap() {
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
@@ -1392,7 +1392,7 @@ std::string RosAlloc::DumpPageMap() {
} else {
// Still part of the current free page run.
DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
- DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
+ DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
remaining_curr_fpr_size -= kPageSize;
@@ -1400,7 +1400,7 @@ std::string RosAlloc::DumpPageMap() {
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
}
@@ -1546,7 +1546,7 @@ bool RosAlloc::Trim() {
void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg) {
// Note: no need to use this to release pages as we already do so in FreePages().
- if (handler == NULL) {
+ if (handler == nullptr) {
return;
}
MutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 14eb80b..f0e8d14 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@ class MarkStackTask : public Task {
mark_stack_pos_(mark_stack_size) {
// We may have to copy part of an existing mark stack when another mark stack overflows.
if (mark_stack_size != 0) {
- DCHECK(mark_stack != NULL);
+ DCHECK(mark_stack != nullptr);
// TODO: Check performance?
std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
}
@@ -850,7 +850,7 @@ class RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
- : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
+ : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
end_(end) {
}
@@ -1260,11 +1260,11 @@ void MarkSweep::ProcessMarkStack(bool paused) {
static const size_t kFifoSize = 4;
BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = NULL;
+ Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Object* mark_stack_obj = mark_stack_->PopBack();
- DCHECK(mark_stack_obj != NULL);
+ DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index dbf01d8..82d02e7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -750,7 +750,7 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
if (from_space_->HasAddress(obj)) {
- // Returns either the forwarding address or nullptr.
+ // Returns either the forwarding address or null.
return GetForwardingAddressInFromSpace(obj);
} else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
to_space_->HasAddress(obj)) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b770096..3e56205 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -293,7 +293,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator
return nullptr;
}
// Try allocating a new thread local buffer, if the allocaiton fails the space must be
- // full so return nullptr.
+ // full so return null.
if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
return nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index beaf067..b80c4b6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -412,7 +412,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
// Allocate the card table.
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
- CHECK(card_table_.get() != NULL) << "Failed to create card table";
+ CHECK(card_table_.get() != nullptr) << "Failed to create card table";
if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
rb_table_.reset(new accounting::ReadBarrierTable());
@@ -1052,7 +1052,7 @@ space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
@@ -1065,12 +1065,12 @@ space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
space::Space* result = FindContinuousSpaceFromObject(obj, true);
- if (result != NULL) {
+ if (result != nullptr) {
return result;
}
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
@@ -1082,7 +1082,7 @@ space::ImageSpace* Heap::GetImageSpace() const {
return space->AsImageSpace();
}
}
- return NULL;
+ return nullptr;
}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
@@ -2204,7 +2204,7 @@ void Heap::PreZygoteFork() {
// Turn the current alloc space into a zygote space and obtain the new alloc space composed of
// the remaining available space.
// Remove the old space before creating the zygote space since creating the zygote space sets
- // the old alloc space's bitmaps to nullptr.
+ // the old alloc space's bitmaps to null.
RemoveSpace(old_alloc_space);
if (collector::SemiSpace::kUseRememberedSet) {
// Sanity bound check.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 066b4c5..565687c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -396,7 +396,7 @@ class Heap {
void RecordFreeRevoke();
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
- // The call is not needed if NULL is stored in the field.
+ // The call is not needed if null is stored in the field.
ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
const mirror::Object* /*new_value*/) {
card_table_->MarkCard(dst);
@@ -991,7 +991,7 @@ class Heap {
// programs it is "cleared" making it the same as capacity.
size_t growth_limit_;
- // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
+ // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
// a GC should be triggered.
size_t max_allowed_footprint_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 7be0704..4c93a4c 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -30,7 +30,7 @@ ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (!ref->IsEnqueued()) {
EnqueuePendingReference(ref);
@@ -43,7 +43,7 @@ void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
}
void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
list_ = ref;
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index c496a42..df43606 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -45,7 +45,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 9eace89..4fc4ada 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -35,7 +35,7 @@ inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_b
obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- if (LIKELY(obj != NULL)) {
+ if (LIKELY(obj != nullptr)) {
// Zero freshly allocated memory, done while not holding the space's lock.
memset(obj, 0, num_bytes);
}
@@ -57,13 +57,13 @@ inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = allocation_size;
*bytes_tl_bulk_allocated = allocation_size;
}
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 225861d..7b1a421 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -39,7 +39,7 @@ DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::st
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
- CHECK(mspace != NULL);
+ CHECK(mspace != nullptr);
}
DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
@@ -176,7 +176,7 @@ size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
}
size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- DCHECK(ptrs != NULL);
+ DCHECK(ptrs != nullptr);
// Don't need the lock to calculate the size of the freed pointers.
size_t bytes_freed = 0;
@@ -232,7 +232,7 @@ void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_byte
void* arg) {
MutexLock mu(Thread::Current(), lock_);
mspace_inspect_all(mspace_, callback, arg);
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
size_t DlMallocSpace::GetFootprint() {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e28e8d7..f350038 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -666,7 +666,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
}
std::unique_ptr<File> file(OS::OpenFileForReading(image_filename));
- if (file.get() == NULL) {
+ if (file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open '%s'", image_filename);
return nullptr;
}
@@ -695,7 +695,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(),
PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
- if (map.get() == NULL) {
+ if (map.get() == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
}
@@ -786,7 +786,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
nullptr, error_msg);
- if (oat_file == NULL) {
+ if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
return nullptr;
@@ -811,7 +811,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
}
bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
for (const OatFile::OatDexFile* oat_dex_file : oat_file_->GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
uint32_t dex_file_location_checksum;
@@ -837,7 +837,7 @@ const OatFile* ImageSpace::GetOatFile() const {
}
OatFile* ImageSpace::ReleaseOatFile() {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
return oat_file_.release();
}
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 9ae2af4..54dc7a6 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -52,7 +52,7 @@ class ImageSpace : public MemMapSpace {
InstructionSet image_isa);
// Reads the image header from the specified image location for the
- // instruction set image_isa. Returns nullptr on failure, with
+ // instruction set image_isa. Returns null on failure, with
// reason in error_msg.
static ImageHeader* ReadImageHeader(const char* image_location,
InstructionSet image_isa,
@@ -122,7 +122,7 @@ class ImageSpace : public MemMapSpace {
private:
// Tries to initialize an ImageSpace from the given image path,
- // returning NULL on error.
+ // returning null on error.
//
// If validate_oat_file is false (for /system), do not verify that
// image's OatFile is up-to-date relative to its DexFile
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 7353c83..4dfdaa5 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -124,9 +124,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (UNLIKELY(mem_map == NULL)) {
+ if (UNLIKELY(mem_map == nullptr)) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
- return NULL;
+ return nullptr;
}
mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
if (kIsDebugBuild) {
@@ -206,7 +206,7 @@ void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg)
for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
MemMap* mem_map = it->second;
callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
- callback(NULL, NULL, 0, arg);
+ callback(nullptr, nullptr, 0, arg);
}
}
@@ -316,7 +316,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
+ CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 9195b06..b014217 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -75,13 +75,13 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(*initial_size) << ") is larger than its capacity ("
<< PrettySize(*growth_limit) << ")";
- return NULL;
+ return nullptr;
}
if (*growth_limit > *capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(*growth_limit) << ") is larger than the capacity ("
<< PrettySize(*capacity) << ")";
- return NULL;
+ return nullptr;
}
// Page align growth limit and capacity which will be used to manage mmapped storage
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bbf1bbb..5f3a1db 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -60,7 +60,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Allocate num_bytes without allowing the underlying space to grow.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
- // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
+ // Return the storage space required by obj. If usable_size isn't null then it is set to the
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
virtual size_t Free(Thread* self, mirror::Object* ptr)
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index b88ce24..19109f0 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -40,7 +40,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// space to confirm the request was granted.
static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 9d582a3..25d4445 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -73,18 +73,18 @@ inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes
rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_bytes_allocated,
&rosalloc_usable_size,
&rosalloc_bytes_tl_bulk_allocated));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = rosalloc_bytes_allocated;
DCHECK_EQ(rosalloc_usable_size, rosalloc_->UsableSize(result));
if (usable_size != nullptr) {
*usable_size = rosalloc_usable_size;
}
- DCHECK(bytes_tl_bulk_allocated != NULL);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
*bytes_tl_bulk_allocated = rosalloc_bytes_tl_bulk_allocated;
}
return result;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index eb1d5f4..2c7d93e 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -64,9 +64,9 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
capacity, low_memory_mode, running_on_valgrind);
- if (rosalloc == NULL) {
+ if (rosalloc == nullptr) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
- return NULL;
+ return nullptr;
}
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
@@ -113,10 +113,10 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
size_t starting_size = Heap::kDefaultStartingSize;
MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
requested_begin);
- if (mem_map == NULL) {
+ if (mem_map == nullptr) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
- return NULL;
+ return nullptr;
}
RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
@@ -145,7 +145,7 @@ allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_
art::gc::allocator::RosAlloc::kPageReleaseModeAll :
art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
running_on_valgrind);
- if (rosalloc != NULL) {
+ if (rosalloc != nullptr) {
rosalloc->SetFootprintLimit(initial_size);
} else {
PLOG(ERROR) << "RosAlloc::Create failed";
@@ -170,7 +170,7 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
rosalloc_->SetFootprintLimit(footprint);
}
// Note RosAlloc zeroes memory internally.
- // Return the new allocation or NULL.
+ // Return the new allocation or null.
CHECK(!kDebugSpaces || result == nullptr || Contains(result));
return result;
}
@@ -192,7 +192,7 @@ MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& n
size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
if (kDebugSpaces) {
- CHECK(ptr != NULL);
+ CHECK(ptr != nullptr);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
if (kRecentFreeCount > 0) {
@@ -309,7 +309,7 @@ void RosAllocSpace::InspectAllRosAllocWithSuspendAll(
MutexLock mu2(self, *Locks::thread_list_lock_);
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
}
tl->ResumeAll();
@@ -324,7 +324,7 @@ void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end,
// from SignalCatcher::HandleSigQuit().
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
} else if (Locks::mutator_lock_->IsSharedHeld(self)) {
// The mutators are not suspended yet and we have a shared access