summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-21 16:50:40 -0700
committerMathieu Chartier <mathieuc@google.com>2015-04-22 12:44:27 -0700
commit2cebb24bfc3247d3e9be138a3350106737455918 (patch)
treed04d27d21b3c7733d784e303f01f873bb99e7770
parent1f02f1a7b3073b8fef07770a67fbf94afad317f0 (diff)
downloadart-2cebb24bfc3247d3e9be138a3350106737455918.zip
art-2cebb24bfc3247d3e9be138a3350106737455918.tar.gz
art-2cebb24bfc3247d3e9be138a3350106737455918.tar.bz2
Replace NULL with nullptr
Also fixed some lines that were too long, and a few other minor details. Change-Id: I6efba5fb6e03eb5d0a300fddb2a75bf8e2f175cb
-rw-r--r--cmdline/cmdline_parser_test.cc2
-rw-r--r--compiler/common_compiler_test.cc2
-rw-r--r--compiler/compiled_method.cc2
-rw-r--r--compiler/dex/dataflow_iterator-inl.h8
-rw-r--r--compiler/dex/dataflow_iterator.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/dex/mir_analysis.cc6
-rw-r--r--compiler/dex/mir_dataflow.cc14
-rw-r--r--compiler/dex/mir_field_info.h2
-rw-r--r--compiler/dex/mir_graph.cc54
-rw-r--r--compiler/dex/mir_graph.h2
-rw-r--r--compiler/dex/mir_method_info.h4
-rw-r--r--compiler/dex/mir_optimization.cc84
-rw-r--r--compiler/dex/pass_driver.h2
-rw-r--r--compiler/dex/pass_driver_me.h7
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc18
-rw-r--r--compiler/dex/quick/arm/call_arm.cc16
-rw-r--r--compiler/dex/quick/arm/int_arm.cc12
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc12
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc2
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc14
-rw-r--r--compiler/dex/quick/codegen_util.cc2
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc2
-rw-r--r--compiler/dex/quick/gen_common.cc26
-rw-r--r--compiler/dex/quick/gen_loadstore.cc2
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc8
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/int_mips.cc8
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc12
-rw-r--r--compiler/dex/quick/mir_to_lir.cc18
-rw-r--r--compiler/dex/quick/mir_to_lir.h14
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc2
-rw-r--r--compiler/dex/quick/quick_compiler.cc6
-rw-r--r--compiler/dex/quick/ralloc_util.cc2
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc16
-rw-r--r--compiler/dex/quick/x86/call_x86.cc2
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc6
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc4
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc2
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc12
-rw-r--r--compiler/dex/ssa_transformation.cc40
-rw-r--r--compiler/dex/verification_results.cc2
-rw-r--r--compiler/dex/verified_method.cc4
-rw-r--r--compiler/dex/verified_method.h2
-rw-r--r--compiler/driver/compiler_driver-inl.h6
-rw-r--r--compiler/driver/compiler_driver.cc8
-rw-r--r--compiler/driver/compiler_driver.h14
-rw-r--r--compiler/driver/compiler_driver_test.cc31
-rw-r--r--compiler/elf_builder.h6
-rw-r--r--compiler/elf_writer_quick.cc2
-rw-r--r--compiler/elf_writer_test.cc8
-rw-r--r--compiler/image_test.cc16
-rw-r--r--compiler/image_writer.cc24
-rw-r--r--compiler/jni/quick/calling_convention.cc4
-rw-r--r--compiler/jni/quick/jni_compiler.cc12
-rw-r--r--compiler/oat_writer.cc18
-rw-r--r--compiler/oat_writer.h8
-rw-r--r--compiler/optimizing/gvn.cc2
-rw-r--r--compiler/optimizing/nodes.cc4
-rw-r--r--compiler/optimizing/nodes.h8
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h7
-rw-r--r--compiler/output_stream_test.cc6
-rw-r--r--compiler/utils/arm/assembler_arm.h14
-rw-r--r--compiler/utils/arm64/assembler_arm64.h6
-rw-r--r--compiler/utils/assembler.cc8
-rw-r--r--compiler/utils/assembler.h18
-rw-r--r--compiler/utils/dedupe_set.h4
-rw-r--r--compiler/utils/mips/assembler_mips.h14
-rw-r--r--compiler/utils/mips64/assembler_mips64.h6
-rw-r--r--compiler/utils/x86/assembler_x86.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc2
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
-rw-r--r--dalvikvm/dalvikvm.cc22
-rw-r--r--dex2oat/dex2oat.cc4
-rw-r--r--disassembler/disassembler.cc2
-rw-r--r--imgdiag/imgdiag.cc16
-rw-r--r--oatdump/oatdump.cc2
-rw-r--r--runtime/arch/arm/context_arm.h2
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc15
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S6
-rw-r--r--runtime/arch/arm64/context_arm64.h2
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc17
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S6
-rw-r--r--runtime/arch/mips/context_mips.h2
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc17
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S6
-rw-r--r--runtime/arch/mips64/context_mips64.h2
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc23
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S6
-rw-r--r--runtime/arch/stub_test.cc18
-rw-r--r--runtime/arch/x86/context_x86.h2
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc26
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S12
-rw-r--r--runtime/arch/x86_64/context_x86_64.h2
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc27
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S10
-rw-r--r--runtime/art_field-inl.h8
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/base/hex_dump.cc2
-rw-r--r--runtime/base/logging.cc2
-rw-r--r--runtime/base/logging.h2
-rw-r--r--runtime/base/mutex-inl.h27
-rw-r--r--runtime/base/mutex.cc58
-rw-r--r--runtime/base/mutex.h4
-rw-r--r--runtime/base/mutex_test.cc7
-rw-r--r--runtime/base/scoped_flock.cc9
-rw-r--r--runtime/base/stl_util.h20
-rw-r--r--runtime/base/variant_map.h2
-rw-r--r--runtime/base/variant_map_test.cc2
-rw-r--r--runtime/check_jni.cc8
-rw-r--r--runtime/class_linker-inl.h8
-rw-r--r--runtime/class_linker.cc4
-rw-r--r--runtime/class_linker.h6
-rw-r--r--runtime/common_runtime_test.cc4
-rw-r--r--runtime/common_throws.cc64
-rw-r--r--runtime/debugger.cc19
-rw-r--r--runtime/debugger.h2
-rw-r--r--runtime/dex_file-inl.h2
-rw-r--r--runtime/dex_file.cc105
-rw-r--r--runtime/dex_file.h41
-rw-r--r--runtime/dex_file_test.cc28
-rw-r--r--runtime/dex_file_verifier.cc14
-rw-r--r--runtime/dex_file_verifier.h6
-rw-r--r--runtime/dex_file_verifier_test.cc10
-rw-r--r--runtime/dex_instruction.h2
-rw-r--r--runtime/dex_method_iterator.h28
-rw-r--r--runtime/elf_file.cc6
-rw-r--r--runtime/elf_file_impl.h7
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h20
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc16
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc2
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc10
-rw-r--r--runtime/entrypoints/quick/quick_cast_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc4
-rw-r--r--runtime/exception_test.cc2
-rw-r--r--runtime/gc/accounting/atomic_stack.h4
-rw-r--r--runtime/gc/accounting/card_table.cc8
-rw-r--r--runtime/gc/accounting/card_table.h2
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h2
-rw-r--r--runtime/gc/accounting/space_bitmap.cc10
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc4
-rw-r--r--runtime/gc/allocator/rosalloc.cc30
-rw-r--r--runtime/gc/collector/mark_sweep.cc8
-rw-r--r--runtime/gc/collector/semi_space.cc2
-rw-r--r--runtime/gc/heap-inl.h2
-rw-r--r--runtime/gc/heap.cc12
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/gc/reference_queue.cc4
-rw-r--r--runtime/gc/space/bump_pointer_space.h2
-rw-r--r--runtime/gc/space/dlmalloc_space-inl.h6
-rw-r--r--runtime/gc/space/dlmalloc_space.cc6
-rw-r--r--runtime/gc/space/image_space.cc10
-rw-r--r--runtime/gc/space/image_space.h4
-rw-r--r--runtime/gc/space/large_object_space.cc8
-rw-r--r--runtime/gc/space/malloc_space.cc4
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/region_space.h2
-rw-r--r--runtime/gc/space/rosalloc_space-inl.h6
-rw-r--r--runtime/gc/space/rosalloc_space.cc18
-rw-r--r--runtime/gc_map.h2
-rw-r--r--runtime/hprof/hprof.cc6
-rw-r--r--runtime/indirect_reference_table-inl.h2
-rw-r--r--runtime/indirect_reference_table.cc17
-rw-r--r--runtime/indirect_reference_table.h4
-rw-r--r--runtime/indirect_reference_table_test.cc2
-rw-r--r--runtime/instrumentation.cc12
-rw-r--r--runtime/intern_table.cc2
-rw-r--r--runtime/intern_table.h2
-rw-r--r--runtime/intern_table_test.cc4
-rw-r--r--runtime/interpreter/interpreter.cc44
-rw-r--r--runtime/interpreter/interpreter_common.cc10
-rw-r--r--runtime/interpreter/interpreter_common.h4
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc277
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc264
-rw-r--r--runtime/interpreter/unstarted_runtime.cc2
-rw-r--r--runtime/jdwp/jdwp.h2
-rw-r--r--runtime/jdwp/jdwp_expand_buf.cc2
-rw-r--r--runtime/jdwp/jdwp_handler.cc2
-rw-r--r--runtime/jit/jit_code_cache.h4
-rw-r--r--runtime/jni_internal.cc2
-rw-r--r--runtime/jni_internal_test.cc16
-rw-r--r--runtime/mapping_table.h2
-rw-r--r--runtime/mem_map.cc9
-rw-r--r--runtime/mem_map.h14
-rw-r--r--runtime/memory_region.cc2
-rw-r--r--runtime/mirror/art_method-inl.h2
-rw-r--r--runtime/mirror/art_method.cc4
-rw-r--r--runtime/mirror/art_method.h4
-rw-r--r--runtime/mirror/class-inl.h22
-rw-r--r--runtime/mirror/class.h22
-rw-r--r--runtime/mirror/dex_cache_test.cc4
-rw-r--r--runtime/mirror/iftable-inl.h2
-rw-r--r--runtime/mirror/iftable.h10
-rw-r--r--runtime/mirror/object-inl.h10
-rw-r--r--runtime/mirror/object.cc2
-rw-r--r--runtime/mirror/object_array-inl.h4
-rw-r--r--runtime/mirror/object_test.cc136
-rw-r--r--runtime/mirror/stack_trace_element.cc4
-rw-r--r--runtime/mirror/string-inl.h2
-rw-r--r--runtime/mirror/string.cc4
-rw-r--r--runtime/mirror/throwable.cc6
-rw-r--r--runtime/monitor.cc93
-rw-r--r--runtime/monitor_android.cc2
-rw-r--r--runtime/monitor_pool.h3
-rw-r--r--runtime/monitor_test.cc4
-rw-r--r--runtime/native/dalvik_system_DexFile.cc24
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc14
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc45
-rw-r--r--runtime/native/java_lang_Class.cc2
-rw-r--r--runtime/native/java_lang_DexCache.cc8
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/java_lang_Thread.cc12
-rw-r--r--runtime/native/java_lang_reflect_Array.cc15
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc4
-rw-r--r--runtime/nth_caller_visitor.h8
-rw-r--r--runtime/oat_file.cc6
-rw-r--r--runtime/oat_file.h14
-rw-r--r--runtime/oat_file_assistant.cc8
-rw-r--r--runtime/oat_file_assistant.h28
-rw-r--r--runtime/oat_file_assistant_test.cc2
-rw-r--r--runtime/object_callbacks.h5
-rw-r--r--runtime/os_linux.cc4
-rw-r--r--runtime/parsed_options.cc6
-rw-r--r--runtime/parsed_options_test.cc33
-rw-r--r--runtime/primitive.h2
-rw-r--r--runtime/profiler.cc6
-rw-r--r--runtime/reference_table.cc6
-rw-r--r--runtime/reference_table_test.cc4
-rw-r--r--runtime/reflection_test.cc26
-rw-r--r--runtime/runtime.cc42
-rw-r--r--runtime/runtime.h6
-rw-r--r--runtime/runtime_linux.cc27
-rw-r--r--runtime/scoped_thread_state_change.h18
-rw-r--r--runtime/signal_catcher.cc14
-rw-r--r--runtime/signal_set.h2
-rw-r--r--runtime/stack.cc75
-rw-r--r--runtime/stack.h8
-rw-r--r--runtime/thread-inl.h6
-rw-r--r--runtime/thread.cc20
-rw-r--r--runtime/thread.h26
-rw-r--r--runtime/thread_linux.cc14
-rw-r--r--runtime/thread_list.cc14
-rw-r--r--runtime/thread_list.h6
-rw-r--r--runtime/thread_pool.cc2
-rw-r--r--runtime/thread_pool.h4
-rw-r--r--runtime/trace.h4
-rw-r--r--runtime/utils.cc44
-rw-r--r--runtime/utils.h2
-rw-r--r--runtime/utils_test.cc22
-rw-r--r--runtime/verifier/dex_gc_map.cc2
-rw-r--r--runtime/verifier/dex_gc_map.h2
-rw-r--r--runtime/verifier/method_verifier.h8
-rw-r--r--runtime/verifier/method_verifier_test.cc2
-rw-r--r--runtime/verifier/reg_type.h6
-rw-r--r--runtime/verifier/reg_type_cache-inl.h2
-rw-r--r--runtime/well_known_classes.cc8
-rw-r--r--runtime/zip_archive.cc2
-rw-r--r--runtime/zip_archive.h2
-rw-r--r--runtime/zip_archive_test.cc4
-rw-r--r--sigchainlib/sigchain.cc12
-rw-r--r--test/004-JniTest/jni_test.cc4
-rw-r--r--test/004-SignalTest/signaltest.cc2
268 files changed, 1848 insertions, 1658 deletions
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 9f873b3..1386439 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -23,7 +23,7 @@
#include "gtest/gtest.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(NULL));
+ reinterpret_cast<void*>(nullptr));
namespace art {
bool UsuallyEquals(double expected, double actual);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 05cb8b4..5a9e04f 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -263,7 +263,7 @@ void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
- CHECK(method != NULL) << "Virtual method not found: "
+ CHECK(method != nullptr) << "Virtual method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
}
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 4f7a970..d1acada 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -108,7 +108,7 @@ const void* CompiledCode::CodePointer(const void* code_pointer,
}
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 6e25db6..83dfc28 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -23,7 +23,7 @@ namespace art {
// Single forward pass over the nodes.
inline BasicBlock* DataflowIterator::ForwardSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ < end_idx_) {
@@ -38,7 +38,7 @@ inline BasicBlock* DataflowIterator::ForwardSingleNext() {
// Repeat full forward passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we at the end and have we changed something?
if ((idx_ >= end_idx_) && changed_ == true) {
@@ -61,7 +61,7 @@ inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
// Single reverse pass over the nodes.
inline BasicBlock* DataflowIterator::ReverseSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ >= 0) {
@@ -76,7 +76,7 @@ inline BasicBlock* DataflowIterator::ReverseSingleNext() {
// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we done and we changed something during the last iteration?
if ((idx_ < 0) && changed_) {
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 2a06cec..097c2a4 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -72,7 +72,7 @@ namespace art {
: mir_graph_(mir_graph),
start_idx_(start_idx),
end_idx_(end_idx),
- block_id_list_(NULL),
+ block_id_list_(nullptr),
idx_(0),
repeats_(0),
changed_(false) {}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ef94d8b..d1ddfda 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -301,7 +301,7 @@ extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::C
art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
- art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+ art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
driver.GetVerifiedMethod(&dex_file, method_idx));
art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3d7a640..9099e8a 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -968,7 +968,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
* edges until we reach an explicit branch or return.
*/
BasicBlock* ending_bb = bb;
- if (ending_bb->last_mir_insn != NULL) {
+ if (ending_bb->last_mir_insn != nullptr) {
uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
while ((ending_flags & kAnBranch) == 0) {
ending_bb = GetBasicBlock(ending_bb->fall_through);
@@ -998,7 +998,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
bool done = false;
while (!done) {
tbb->visited = true;
- for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
// Skip any MIR pseudo-op.
continue;
@@ -1195,7 +1195,7 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
ClearAllVisitedFlags();
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
AnalyzeBlock(bb, &stats);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index eaaf540..b4aec98 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -989,7 +989,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
MIR* mir;
ArenaBitVector *use_v, *def_v, *live_in_v;
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
use_v = bb->data_flow_info->use_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
@@ -998,7 +998,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
live_in_v = bb->data_flow_info->live_in_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
@@ -1188,7 +1188,7 @@ void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
/* Entry function to convert a block into SSA representation */
bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
/*
* Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
@@ -1211,7 +1211,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
}
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
kArenaAllocDFInfo));
@@ -1402,8 +1402,8 @@ void MIRGraph::CountUses(BasicBlock* bb) {
return;
}
uint32_t weight = GetUseCountWeight(bb);
- for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
@@ -1448,7 +1448,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
void MIRGraph::VerifyDataflow() {
/* Verify if all blocks are connected as claimed */
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
VerifyPredInfo(bb);
}
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 11773e7..e4570fd 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -124,7 +124,7 @@ class MirFieldInfo {
uint16_t declaring_field_idx_;
// The type index of the class declaring the field, 0 if unresolved.
uint16_t declaring_class_idx_;
- // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ // The dex file that defines the class containing the field and the field, null if unresolved.
const DexFile* declaring_dex_file_;
};
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 7d0729f..b5c42f1 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -81,15 +81,15 @@ const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
};
MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
- : reg_location_(NULL),
+ : reg_location_(nullptr),
block_id_map_(std::less<unsigned int>(), arena->Adapter()),
cu_(cu),
ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
- vreg_to_ssa_map_(NULL),
- ssa_last_defs_(NULL),
- is_constant_v_(NULL),
- constant_values_(NULL),
+ vreg_to_ssa_map_(nullptr),
+ ssa_last_defs_(nullptr),
+ is_constant_v_(nullptr),
+ constant_values_(nullptr),
use_counts_(arena->Adapter()),
raw_use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
@@ -106,24 +106,24 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
max_nested_loops_(0u),
- i_dom_list_(NULL),
+ i_dom_list_(nullptr),
temp_scoped_alloc_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
- try_block_addr_(NULL),
- entry_block_(NULL),
- exit_block_(NULL),
- current_code_item_(NULL),
+ try_block_addr_(nullptr),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
+ current_code_item_(nullptr),
m_units_(arena->Adapter()),
method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
- opcode_count_(NULL),
+ opcode_count_(nullptr),
num_ssa_regs_(0),
extended_basic_blocks_(arena->Adapter()),
method_sreg_(0),
attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke.
- checkstats_(NULL),
+ checkstats_(nullptr),
arena_(arena),
backward_branches_(0),
forward_branches_(0),
@@ -185,13 +185,13 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
DCHECK_GT(code_offset, orig_block->start_offset);
MIR* insn = orig_block->first_mir_insn;
- MIR* prev = NULL; // Will be set to instruction before split.
+ MIR* prev = nullptr; // Will be set to instruction before split.
while (insn) {
if (insn->offset == code_offset) break;
prev = insn;
insn = insn->next;
}
- if (insn == NULL) {
+ if (insn == nullptr) {
LOG(FATAL) << "Break split failed";
}
// Now insn is at the instruction where we want to split, namely
@@ -530,7 +530,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
size = switch_data[1];
first_key = switch_data[2] | (switch_data[3] << 16);
target_table = reinterpret_cast<const int*>(&switch_data[4]);
- keyTable = NULL; // Make the compiler happy.
+ keyTable = nullptr; // Make the compiler happy.
/*
* Sparse switch data format:
* ushort ident = 0x0200 magic value
@@ -718,8 +718,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
// If this is the first method, set up default entry and exit blocks.
if (current_method_ == 0) {
- DCHECK(entry_block_ == NULL);
- DCHECK(exit_block_ == NULL);
+ DCHECK(entry_block_ == nullptr);
+ DCHECK(exit_block_ == nullptr);
DCHECK_EQ(GetNumBlocks(), 0U);
// Use id 0 to represent a null block.
BasicBlock* null_block = CreateNewBB(kNullBlock);
@@ -755,7 +755,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
insn->m_unit_index = current_method_;
int width = ParseInsn(code_ptr, &insn->dalvikInsn);
Instruction::Code opcode = insn->dalvikInsn.opcode;
- if (opcode_count_ != NULL) {
+ if (opcode_count_ != nullptr) {
opcode_count_[static_cast<int>(opcode)]++;
}
@@ -879,7 +879,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
void MIRGraph::ShowOpcodeStats() {
- DCHECK(opcode_count_ != NULL);
+ DCHECK(opcode_count_ != nullptr);
LOG(INFO) << "Opcode Count";
for (int i = 0; i < kNumPackedOpcodes; i++) {
if (opcode_count_[i] != 0) {
@@ -947,7 +947,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
return;
}
file = fopen(fpath.c_str(), "w");
- if (file == NULL) {
+ if (file == nullptr) {
PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
return;
}
@@ -961,7 +961,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
for (idx = 0; idx < num_blocks; idx++) {
int block_idx = all_blocks ? idx : dfs_order_[idx];
BasicBlock* bb = GetBasicBlock(block_idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
if (bb->hidden) continue;
if (bb->block_type == kEntryBlock) {
@@ -1501,8 +1501,8 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
}
nop = true;
}
- int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
- int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+ int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+ int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
// Note that this does not check the MIR's opcode in all cases. In cases where it
@@ -1530,7 +1530,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
for (int i = 0; i < uses; i++) {
str.append(" ");
str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
- if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+ if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
// For the listing, skip the high sreg.
i++;
}
@@ -1623,7 +1623,7 @@ std::string MIRGraph::GetSSAName(int ssa_reg) {
// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
- if (reg_location_ == NULL) {
+ if (reg_location_ == nullptr) {
// Pre-SSA - just use the standard name.
return GetSSAName(ssa_reg);
}
@@ -1716,7 +1716,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bo
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
- if (move_result_mir == NULL) {
+ if (move_result_mir == nullptr) {
info->result.location = kLocInvalid;
} else {
info->result = GetRawDest(move_result_mir);
@@ -2294,7 +2294,7 @@ bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const
void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
// Reset flags for all MIRs in bb.
- for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= (~reset_flags);
}
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d7e4dd9..0db54bf 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -602,7 +602,7 @@ class MIRGraph {
BasicBlock* GetBasicBlock(unsigned int block_id) const {
DCHECK_LT(block_id, block_list_.size()); // NOTE: NullBasicBlockId is 0.
- return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
+ return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
}
size_t GetBasicBlockListCount() const {
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 3706012..946c74b 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -88,7 +88,7 @@ class MirMethodInfo {
// The type index of the class declaring the method, 0 if unresolved.
uint16_t declaring_class_idx_;
// The dex file that defines the class containing the method and the method,
- // nullptr if unresolved.
+ // null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -223,7 +223,7 @@ class MirMethodLoweringInfo : public MirMethodInfo {
uintptr_t direct_code_;
uintptr_t direct_method_;
// Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
- // devirtualized invoke target if available, nullptr and 0u otherwise.
+ // devirtualized invoke target if available, null and 0u otherwise.
// After Resolve() they hold the actual target method that will be called; it will be either
// a devirtualized target method or the compilation's unit's dex file and MethodIndex().
const DexFile* target_dex_file_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 546e67a..467c14e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -55,7 +55,7 @@ void MIRGraph::SetConstantWide(int32_t ssa_reg, int64_t value) {
void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
MIR* mir;
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// Skip pass if BB has MIR without SSA representation.
if (mir->ssa_rep == nullptr) {
return;
@@ -116,11 +116,11 @@ void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
/* Advance to next strictly dominated MIR node in an extended basic block */
MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
- if (mir != NULL) {
+ if (mir != nullptr) {
mir = mir->next;
- while (mir == NULL) {
+ while (mir == nullptr) {
bb = GetBasicBlock(bb->fall_through);
- if ((bb == NULL) || Predecessors(bb) != 1) {
+ if ((bb == nullptr) || Predecessors(bb) != 1) {
// mir is null and we cannot proceed further.
break;
} else {
@@ -134,7 +134,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
/*
* To be used at an invoke mir. If the logically next mir node represents
- * a move-result, return it. Else, return NULL. If a move-result exists,
+ * a move-result, return it. Else, return nullptr. If a move-result exists,
* it is required to immediately follow the invoke with no intervening
* opcodes or incoming arcs. However, if the result of the invoke is not
* used, a move-result may not be present.
@@ -142,7 +142,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* tbb = bb;
mir = AdvanceMIR(&tbb, mir);
- while (mir != NULL) {
+ while (mir != nullptr) {
if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
@@ -152,7 +152,7 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
mir = AdvanceMIR(&tbb, mir);
} else {
- mir = NULL;
+ mir = nullptr;
}
}
return mir;
@@ -160,29 +160,29 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
if (bb->block_type == kDead) {
- return NULL;
+ return nullptr;
}
DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock));
BasicBlock* bb_taken = GetBasicBlock(bb->taken);
BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
- if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
+ if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
// Follow simple unconditional branches.
bb = bb_taken;
} else {
// Follow simple fallthrough
- bb = (bb_taken != NULL) ? NULL : bb_fall_through;
+ bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
}
- if (bb == NULL || (Predecessors(bb) != 1)) {
- return NULL;
+ if (bb == nullptr || (Predecessors(bb) != 1)) {
+ return nullptr;
}
DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
return bb;
}
static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
if (mir->ssa_rep->uses[i] == ssa_name) {
@@ -191,11 +191,11 @@ static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
}
}
}
- return NULL;
+ return nullptr;
}
static SelectInstructionKind SelectKind(MIR* mir) {
- // Work with the case when mir is nullptr.
+ // Work with the case when mir is null.
if (mir == nullptr) {
return kSelectNone;
}
@@ -256,7 +256,8 @@ size_t MIRGraph::GetNumAvailableVRTemps() {
}
// Calculate remaining ME temps available.
- size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+ size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
+ reserved_temps_for_backend_;
if (num_non_special_compiler_temps_ >= remaining_me_temps) {
return 0;
@@ -347,7 +348,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
size_t available_temps = GetNumAvailableVRTemps();
if (available_temps <= 0 || (available_temps <= 1 && wide)) {
if (verbose) {
- LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+ LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
+ << " are available.";
}
return nullptr;
}
@@ -365,8 +367,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
if (verbose) {
- LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
- << " and s" << compiler_temp->s_reg_low << " has been created.";
+ LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
+ << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
}
if (wide) {
@@ -478,8 +480,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
allocator.get()));
}
- while (bb != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ while (bb != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// TUNING: use the returned value number for CSE.
if (use_lvn) {
local_valnum->GetValueNumber(mir);
@@ -538,7 +540,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Bitcode doesn't allow this optimization.
break;
}
- if (mir->next != NULL) {
+ if (mir->next != nullptr) {
MIR* mir_next = mir->next;
// Make sure result of cmp is used by next insn and nowhere else
if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
@@ -594,12 +596,12 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
- DCHECK(ft != NULL);
+ DCHECK(ft != nullptr);
BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
BasicBlock* ft_tk = GetBasicBlock(ft->taken);
BasicBlock* tk = GetBasicBlock(bb->taken);
- DCHECK(tk != NULL);
+ DCHECK(tk != nullptr);
BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
BasicBlock* tk_tk = GetBasicBlock(tk->taken);
@@ -608,7 +610,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
* transfers to the rejoin block and the fall_though edge goes to a block that
* unconditionally falls through to the rejoin block.
*/
- if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+ if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
(Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
/*
* Okay - we have the basic diamond shape.
@@ -628,7 +630,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
MIR* if_false = ft->first_mir_insn;
// It's possible that the target of the select isn't used - skip those (rare) cases.
MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
- if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+ if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
/*
* We'll convert the IF_EQZ/IF_NEZ to a SELECT. We need to find the
* Phi node in the merge block and delete it (while using the SSA name
@@ -712,7 +714,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
}
}
- bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
+ bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
+ nullptr;
}
if (use_lvn && UNLIKELY(!global_valnum->Good())) {
LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -723,9 +726,9 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
/* Collect stats on number of checks removed */
void MIRGraph::CountChecks(class BasicBlock* bb) {
- if (bb->data_flow_info != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ if (bb->data_flow_info != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -926,7 +929,7 @@ bool MIRGraph::EliminateNullChecksGate() {
// reset MIR_MARK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= ~MIR_MARK;
}
}
@@ -1001,7 +1004,7 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
// no intervening uses.
// Walk through the instruction in the block, updating as necessary
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
@@ -1112,7 +1115,7 @@ void MIRGraph::EliminateNullChecksEnd() {
// converge MIR_MARK with MIR_IGNORE_NULL_CHECK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
@@ -1503,7 +1506,7 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
continue;
}
@@ -1534,7 +1537,8 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
->GenInline(this, bb, mir, target.dex_method_index)) {
if (cu_->verbose || cu_->print_pass) {
LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
- << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
+ << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
+ *target.dex_file)
<< "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< "\" @0x" << std::hex << mir->offset;
}
@@ -1558,7 +1562,7 @@ void MIRGraph::DumpCheckStats() {
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
checkstats_ = stats;
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
CountChecks(bb);
}
if (stats->null_checks > 0) {
@@ -1591,7 +1595,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
bool terminated_by_return = false;
bool do_local_value_numbering = false;
// Visit blocks strictly dominated by this head.
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->visited = true;
terminated_by_return |= bb->terminated_by_return;
do_local_value_numbering |= bb->use_lvn;
@@ -1600,7 +1604,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (terminated_by_return || do_local_value_numbering) {
// Do lvn for all blocks in this extended set.
bb = start_bb;
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->use_lvn = do_local_value_numbering;
bb->dominates_return = terminated_by_return;
bb = NextDominatedBlock(bb);
@@ -1623,7 +1627,7 @@ void MIRGraph::BasicBlockOptimization() {
if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
ClearAllVisitedFlags();
PreOrderDfsIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
BuildExtendedBBList(bb);
}
// Perform extended basic block optimizations.
@@ -1632,7 +1636,7 @@ void MIRGraph::BasicBlockOptimization() {
}
} else {
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
BasicBlockOpt(bb);
}
}
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 671bcec..8762b53 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -68,7 +68,7 @@ class PassDriver {
* @return whether the pass was applied.
*/
virtual bool RunPass(const char* pass_name) {
- // Paranoid: c_unit cannot be nullptr and we need a pass name.
+ // Paranoid: c_unit cannot be null and we need a pass name.
DCHECK(pass_name != nullptr);
DCHECK_NE(pass_name[0], 0);
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 94eef22..cbe4a02 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -88,7 +88,7 @@ class PassDriverME: public PassDriver {
}
bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
- // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+ // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
DCHECK(pass != nullptr);
DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
@@ -211,8 +211,9 @@ class PassDriverME: public PassDriver {
* @param settings_to_fill Fills the options to contain the mapping of name of option to the new
* configuration.
*/
- static void FillOverriddenPassSettings(const PassManagerOptions* options, const char* pass_name,
- SafeMap<const std::string, const OptionContent>& settings_to_fill) {
+ static void FillOverriddenPassSettings(
+ const PassManagerOptions* options, const char* pass_name,
+ SafeMap<const std::string, const OptionContent>& settings_to_fill) {
const std::string& settings = options->GetOverriddenPassOptions();
const size_t settings_len = settings.size();
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index c5ac4c1..df4a9f2 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1055,7 +1055,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -1066,7 +1066,7 @@ void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -1084,7 +1084,7 @@ void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
uint8_t* const write_buffer = write_pos;
- for (; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = (write_pos - write_buffer);
if (!lir->flags.is_nop) {
int opcode = lir->opcode;
@@ -1258,8 +1258,8 @@ void ArmMir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
/*
* NOTE: the lir being considered here will be encoded following the switch (so long as
* we're not in a retry situation). However, any new non-pc_rel instructions inserted
@@ -1506,7 +1506,7 @@ void ArmMir2Lir::AssembleLIR() {
case kFixupAdr: {
const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
LIR* target = lir->target;
- int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != nullptr) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
offset_adjustment);
int32_t disp = target_disp - ((lir->offset + 4) & ~3);
@@ -1642,7 +1642,7 @@ size_t ArmMir2Lir::GetInsnSize(LIR* lir) {
uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
if (!lir->flags.is_nop) {
if (lir->flags.fixup != kFixupNone) {
@@ -1658,8 +1658,8 @@ uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offse
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3d18af6..6ba4016 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -124,7 +124,7 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -156,7 +156,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
@@ -165,12 +165,12 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
// r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -238,7 +238,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
if (!kUseReadBarrier) {
@@ -252,16 +252,16 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 62903af..8d20f1b 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -138,10 +138,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocatio
RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, -1);
OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
- LIR* branch1 = OpCondBranch(kCondLt, NULL);
- LIR* branch2 = OpCondBranch(kCondGt, NULL);
+ LIR* branch1 = OpCondBranch(kCondLt, nullptr);
+ LIR* branch2 = OpCondBranch(kCondGt, nullptr);
OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
- LIR* branch3 = OpCondBranch(kCondEq, NULL);
+ LIR* branch3 = OpCondBranch(kCondEq, nullptr);
LIR* it = OpIT(kCondHi, "E");
NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
@@ -389,7 +389,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va
* generate the long form in an attempt to avoid an extra assembly pass.
* TODO: consider interspersing slowpaths in code following unconditional branches.
*/
- bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
+ bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
if (!skip && reg.Low8() && (check_value == 0)) {
if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
@@ -1159,12 +1159,12 @@ void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
#ifdef ARM_R4_SUSPEND_FLAG
NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
- return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+ return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
#else
RegStorage t_reg = AllocTemp();
LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
t_reg, kUnsignedHalf, kNotVolatile);
- LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
+ LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
0, target);
FreeTemp(t_reg);
return cmp_branch;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 25ea694..2ef92f8 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -90,7 +90,7 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
}
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&literal_list_, value);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -411,7 +411,7 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_s
return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
}
@@ -695,7 +695,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
}
LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- LIR* res = NULL;
+ LIR* res = nullptr;
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
if (r_dest.IsFloat()) {
@@ -721,10 +721,10 @@ LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
}
}
- if (res == NULL) {
+ if (res == nullptr) {
// No short form - load from the literal pool.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -814,7 +814,7 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
- LIR* store = NULL;
+ LIR* store = nullptr;
ArmOpcode opcode = kThumbBkpt;
bool thumb_form = (all_low_regs && (scale == 0));
RegStorage reg_ptr;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2f1ae66..b78fb80 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -663,7 +663,7 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -674,7 +674,7 @@ void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -889,8 +889,8 @@ void Arm64Mir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requiring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
// NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
// the time of insertion. Note that inserted instructions don't need use/def flags, but do
// need size and pc-rel status properly updated.
@@ -1037,7 +1037,7 @@ void Arm64Mir2Lir::AssembleLIR() {
// Check that the instruction preceding the multiply-accumulate is a load or store.
if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
// insert a NOP between the load/store and the multiply-accumulate.
- LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+ LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
new_lir->offset = lir->offset;
new_lir->flags.fixup = kFixupNone;
new_lir->flags.size = EncodingMap[kA64Nop0].size;
@@ -1108,7 +1108,7 @@ size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) {
uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
A64Opcode opcode = UNWIDE(lir->opcode);
if (!lir->flags.is_nop) {
@@ -1123,8 +1123,8 @@ uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t off
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 4abbd77..9a7c2ad 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -127,7 +127,7 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, key_reg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -167,7 +167,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -176,12 +176,12 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
// w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
not_unlocked_branch->target = slow_path_target;
@@ -220,7 +220,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -235,16 +235,16 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b7dbd0a..9340d01 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -803,7 +803,7 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
- LIR* early_exit = OpCondBranch(kCondNe, NULL);
+ LIR* early_exit = OpCondBranch(kCondNe, nullptr);
NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e9ad8ba..483231f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -121,7 +121,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
// Wide, as we need 8B alignment.
data_target = AddWideData(&literal_list_, value, 0);
}
@@ -148,7 +148,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -525,7 +525,7 @@ LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -624,7 +624,7 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
@@ -658,7 +658,7 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
@@ -1190,7 +1190,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
*/
LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
- LIR* load = NULL;
+ LIR* load = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
@@ -1286,7 +1286,7 @@ LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- LIR* store = NULL;
+ LIR* store = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f4a318..fb68335 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1080,7 +1080,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for nullptr.
+ // Reserve pointer id 0 for null.
size_t null_idx = WrapPointer<void>(nullptr);
DCHECK_EQ(null_idx, 0U);
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index ca31dbf..f5e6c09 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -392,7 +392,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
- dex_file_(NULL) {
+ dex_file_(nullptr) {
static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
"bad arraysize for kClassCacheNames");
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1a72cd7..de5e041 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -87,7 +87,7 @@ void Mir2Lir::GenIfNullUseHelperImmMethod(
const RegStorage r_result_;
};
- LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
@@ -113,10 +113,10 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
}
- // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved.
+ // r_base now points at static storage (Class*) or null if the type is not yet resolved.
LIR* unresolved_branch = nullptr;
if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
- // Check if r_base is nullptr.
+ // Check if r_base is null.
unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
}
LIR* uninit_branch = nullptr;
@@ -136,8 +136,8 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
// There are up to two branches to the static field slow path, the "unresolved" when the type
- // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized.
- // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path.
+ // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+ // At least one will be non-null here, otherwise we wouldn't generate the slow path.
StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
RegStorage r_base_in, RegStorage r_method_in)
: LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
@@ -165,7 +165,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
}
private:
- // Second branch to the slow path, or nullptr if there's only one branch.
+ // Second branch to the slow path, or null if there's only one branch.
LIR* const second_branch_;
const int storage_index_;
@@ -173,7 +173,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
RegStorage r_method_;
};
- // The slow path is invoked if the r_base is nullptr or the class pointed
+ // The slow path is invoked if the r_base is null or the class pointed
// to by it is not initialized.
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -319,7 +319,7 @@ LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
/* Perform an explicit null-check on a register. */
LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
+ return nullptr;
}
return GenNullCheck(m_reg);
}
@@ -1188,7 +1188,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
DCHECK(!IsSameReg(result_reg, object.reg));
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
RegStorage check_class = AllocTypedTemp(false, kRefReg);
RegStorage object_class = AllocTypedTemp(false, kRefReg);
@@ -1287,7 +1287,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
// On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
LoadConstant(rl_result.reg, 0);
}
- LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
/* load object->klass_ */
RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref.
@@ -1295,7 +1295,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
ref_class_reg, kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
- LIR* branchover = NULL;
+ LIR* branchover = nullptr;
if (type_known_final) {
// rl_result == ref == class.
GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
@@ -1320,7 +1320,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (!type_known_abstract) {
/* Uses branchovers */
LoadConstant(rl_result.reg, 1); // assume true
- branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
+ branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
}
OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class
@@ -2129,7 +2129,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) {
}
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
FlushAllRegs();
- LIR* branch = OpTestSuspend(NULL);
+ LIR* branch = OpTestSuspend(nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
} else {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 54e5742..4215e8b 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -46,7 +46,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
if (rl_src.location == kLocPhysReg) {
OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
- // On 64-bit targets, will sign extend. Make sure constant reference is always NULL.
+ // On 64-bit targets, will sign extend. Make sure constant reference is always null.
DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 936ff42..f9b9684 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -613,7 +613,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
LOG(FATAL) << "Unexpected branch kind " << opcode;
UNREACHABLE();
}
- LIR* hop_target = NULL;
+ LIR* hop_target = nullptr;
if (!unconditional) {
hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
@@ -650,7 +650,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success.
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
@@ -668,7 +668,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
* (label2 - label1), where label1 is a standard
* kPseudoTargetLabel and is stored in operands[2].
* If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
+ * and is found in lir->target. If operands[3] is non-nullptr,
* then it is a Switch/Data table.
*/
int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
@@ -863,7 +863,7 @@ int MipsMir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(lir->opcode >= 0)) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 05570e4..39b9cc7 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -112,7 +112,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
// Test loop.
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
Load32Disp(r_base, 0, r_key);
OpRegImm(kOpAdd, r_base, 8);
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
@@ -188,7 +188,7 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
tab_rec->anchor = base_label;
// Bounds check - if < 0 or >= size continue following switch.
- LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+ LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
// Materialize the table base pointer.
RegStorage r_base = AllocPtrSizeTemp();
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1ca8bb6..9319c64 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -68,7 +68,7 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati
NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
@@ -128,7 +128,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage sr
break;
default:
LOG(FATAL) << "No support for ConditionCode: " << cond;
- return NULL;
+ return nullptr;
}
if (cmp_zero) {
branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
@@ -278,7 +278,7 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
LoadConstant(rs_dest, false_val);
LIR* target_label = NewLIR0(kPseudoTargetLabel);
ne_branchover->target = target_label;
@@ -447,7 +447,7 @@ void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
// Test suspend flag, return target of taken suspend branch.
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
+ return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
}
// Decrement register and branch on condition.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8ab5422..95c61cd 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -566,7 +566,7 @@ LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
/* Load value from base + scaled index. */
LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
LIR *res;
MipsOpCode opcode = kMipsNop;
bool is64bit = cu_->target64 && r_dest.Is64Bit();
@@ -640,7 +640,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
// Store value base base + scaled index.
LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
MipsOpCode opcode = kMipsNop;
RegStorage t_reg = AllocTemp();
@@ -696,8 +696,8 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
* rlp and then restore.
*/
LIR *res;
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
@@ -857,8 +857,8 @@ LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r
LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
LIR *res;
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 2deb727..e9e9161 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1219,7 +1219,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
block_label_list_[block_id].flags.fixup = kFixupLabel;
AppendLIR(&block_label_list_[block_id]);
- LIR* head_lir = NULL;
+ LIR* head_lir = nullptr;
// If this is a catch block, export the start address.
if (bb->catch_entry) {
@@ -1245,7 +1245,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
}
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
ResetRegPool();
if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
ClobberAllTemps();
@@ -1269,7 +1269,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
GenPrintLabel(mir);
// Remember the first LIR for this block.
- if (head_lir == NULL) {
+ if (head_lir == nullptr) {
head_lir = &block_label_list_[bb->id];
// Set the first label as a scheduling barrier.
DCHECK(!head_lir->flags.use_def_invalid);
@@ -1309,7 +1309,7 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
- BasicBlock*bb = NULL;
+ BasicBlock*bb = nullptr;
for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
if (candidate->block_type == kDalvikByteCode) {
@@ -1317,11 +1317,11 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
break;
}
}
- if (bb == NULL) {
+ if (bb == nullptr) {
return false;
}
DCHECK_EQ(bb->start_offset, 0);
- DCHECK(bb->first_mir_insn != NULL);
+ DCHECK(bb->first_mir_insn != nullptr);
// Get the first instruction.
MIR* mir = bb->first_mir_insn;
@@ -1343,17 +1343,17 @@ void Mir2Lir::MethodMIR2LIR() {
PreOrderDfsIterator iter(mir_graph_);
BasicBlock* curr_bb = iter.Next();
BasicBlock* next_bb = iter.Next();
- while (curr_bb != NULL) {
+ while (curr_bb != nullptr) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
- if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {
next_bb = iter.Next();
- } while ((next_bb != NULL) && (next_bb->block_type == kDead));
+ } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
}
HandleSlowPaths();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f9efe37..8f08a51 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -388,7 +388,7 @@ class Mir2Lir {
LIR* DefEnd() { return def_end_; }
void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
void ResetDefBody() { def_start_ = def_end_ = nullptr; }
- // Find member of aliased set matching storage_used; return nullptr if none.
+ // Find member of aliased set matching storage_used; return null if none.
RegisterInfo* FindMatchingView(uint32_t storage_used) {
RegisterInfo* res = Master();
for (; res != nullptr; res = res->GetAliasChain()) {
@@ -605,7 +605,7 @@ class Mir2Lir {
char* ArenaStrdup(const char* str) {
size_t len = strlen(str) + 1;
char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
- if (res != NULL) {
+ if (res != nullptr) {
strncpy(res, str, len);
}
return res;
@@ -650,7 +650,7 @@ class Mir2Lir {
void DumpPromotionMap();
void CodegenDump();
LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
- int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+ int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
LIR* NewLIR0(int opcode);
LIR* NewLIR1(int opcode, int dest);
LIR* NewLIR2(int opcode, int dest, int src1);
@@ -1120,8 +1120,8 @@ class Mir2Lir {
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
- * @param target branch target (or nullptr)
- * @param compare output for getting LIR for comparison (or nullptr)
+ * @param target branch target (or null)
+ * @param compare output for getting LIR for comparison (or null)
* @returns The branch instruction that was generated.
*/
virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
@@ -1854,7 +1854,7 @@ class Mir2Lir {
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
- // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+ // Record the MIR that generated a given safepoint (null for prologue safepoints).
ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
// The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
@@ -1869,7 +1869,7 @@ class Mir2Lir {
// For architectures that don't have true PC-relative addressing (see pc_rel_temp_
// above) and also have a limited range of offsets for loads, it's be useful to
// know the minimum offset into the dex cache arrays, so we calculate that as well
- // if pc_rel_temp_ isn't nullptr.
+ // if pc_rel_temp_ isn't null.
uint32_t dex_cache_arrays_min_offset_;
dwarf::LazyDebugFrameOpCodeWriter cfi_;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 555d5b9..b3c7355 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -100,7 +100,7 @@ class QuickCFITest : public CFITest {
}
}
m2l->AdjustSpillMask();
- m2l->GenEntrySequence(NULL, m2l->LocCReturnRef());
+ m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
m2l->GenExitSequence();
m2l->HandleSlowPaths();
m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fc3e687..39eb117 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -102,7 +102,7 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
"kDisabledOpts unexpected");
-// Supported shorty types per instruction set. nullptr means that all are available.
+// Supported shorty types per instruction set. null means that all are available.
// Z : boolean
// B : byte
// S : short
@@ -422,7 +422,7 @@ static int kInvokeOpcodes[] = {
Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
};
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// Unsupported opcodes. null can be used when everything is supported. Size of the lists is
// recorded below.
static const int* kUnsupportedOpcodes[] = {
// 0 = kNone.
@@ -515,7 +515,7 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e779479..8ec86fa 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -935,7 +935,7 @@ bool Mir2Lir::CheckCorePoolSanity() {
RegStorage my_reg = info->GetReg();
RegStorage partner_reg = info->Partner();
RegisterInfo* partner = GetRegInfo(partner_reg);
- DCHECK(partner != NULL);
+ DCHECK(partner != nullptr);
DCHECK(partner->IsWide());
DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
DCHECK(partner->IsLive());
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index af19f5e..eb33357 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1633,7 +1633,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
AssemblerStatus res = kSuccess; // Assume success
const bool kVerbosePcFixup = false;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (IsPseudoLirOp(lir->opcode)) {
continue;
}
@@ -1646,7 +1646,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
switch (lir->opcode) {
case kX86Jcc8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1679,7 +1679,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jcc32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1695,7 +1695,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jecxz8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc;
pc = lir->offset + 2; // opcode + rel8
CodeOffset target = target_lir->offset;
@@ -1706,7 +1706,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1738,7 +1738,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1748,7 +1748,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
default:
if (lir->flags.fixup == kFixupLoad) {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset target = target_lir->offset;
// Handle 64 bit RIP addressing.
if (lir->operands[1] == kRIPReg) {
@@ -1950,7 +1950,7 @@ int X86Mir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d7a5eb0..e2364d8 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -80,7 +80,7 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
RegStorage addr_for_jump;
if (cu_->target64) {
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 10af31a..8e81746 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -484,13 +484,13 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
} else {
NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
- LIR* branch = NULL;
+ LIR* branch = nullptr;
if (unordered_gt) {
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
if (!IsByteRegister(rl_result.reg)) {
- LIR* branch2 = NULL;
+ LIR* branch2 = nullptr;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
@@ -513,7 +513,7 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
- LIR* branch = NULL;
+ LIR* branch = nullptr;
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 2c13b61..943bfc0 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1569,7 +1569,7 @@ LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
} else {
OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
}
- return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+ return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
}
// Decrement register and branch on condition
@@ -3005,7 +3005,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
// We will use this register to compare to memory below.
// References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index a16e242..b460379 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1281,7 +1281,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
- // Is the string non-NULL?
+ // Is the string non-null?
LoadValueDirectFixed(rl_obj, rs_rDX);
GenNullCheck(rs_rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index efcb9ee..61a1bec 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -578,7 +578,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
} else if (pc_rel_base_reg_.Valid() || cu_->target64) {
// We will load the value from the literal area.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -642,8 +642,8 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_dest, OpSize size) {
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_dest.IsPair();
bool is64bit = ((size == k64) || (size == kDouble));
@@ -763,7 +763,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
}
}
- // Always return first load generated as this might cause a fault if base is nullptr.
+ // Always return first load generated as this might cause a fault if base is null.
return load;
}
@@ -791,8 +791,8 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_src, OpSize size,
int opt_flags) {
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_src.IsPair();
bool is64bit = (size == k64) || (size == kDouble);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 197f66d..939bf40 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -26,15 +26,15 @@ namespace art {
void MIRGraph::ClearAllVisitedFlags() {
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
bb->visited = false;
}
}
BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
- if (bb != NULL) {
+ if (bb != nullptr) {
if (bb->visited || bb->hidden) {
- bb = NULL;
+ bb = nullptr;
}
}
return bb;
@@ -42,13 +42,13 @@ BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
- if (res == NULL) {
+ if (res == nullptr) {
res = NeedsVisit(GetBasicBlock(bb->taken));
- if (res == NULL) {
+ if (res == nullptr) {
if (bb->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
res = NeedsVisit(GetBasicBlock(sbi->block));
- if (res != NULL) {
+ if (res != nullptr) {
break;
}
}
@@ -75,7 +75,7 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) {
while (!succ.empty()) {
BasicBlock* curr = succ.back();
BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
- if (next_successor != NULL) {
+ if (next_successor != nullptr) {
MarkPreOrder(next_successor);
succ.push_back(next_successor);
continue;
@@ -107,7 +107,7 @@ void MIRGraph::ComputeDFSOrders() {
if (num_reachable_blocks_ != GetNumBlocks()) {
// Kill all unreachable blocks.
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (!bb->visited) {
bb->Kill(this);
}
@@ -121,7 +121,7 @@ void MIRGraph::ComputeDFSOrders() {
* register idx is defined in BasicBlock bb.
*/
bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
@@ -149,11 +149,11 @@ void MIRGraph::ComputeDefBlockMatrix() {
}
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
FindLocalLiveIn(bb);
}
AllNodesIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
FillDefBlockMatrix(bb);
}
@@ -247,7 +247,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
int num_total_blocks = GetBasicBlockListCount();
- if (bb->dominators == NULL) {
+ if (bb->dominators == nullptr) {
bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
true /* expandable */, kBitMapDominators);
bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
@@ -357,7 +357,7 @@ void MIRGraph::ComputeDominators() {
/* Initialize domination-related data structures */
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
InitializeDominationInfo(bb);
}
@@ -376,7 +376,7 @@ void MIRGraph::ComputeDominators() {
/* Compute the immediate dominators */
RepeatingReversePostOrderDfsIterator iter2(this);
bool change = false;
- for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+ for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
change = ComputeblockIDom(bb);
}
@@ -387,19 +387,19 @@ void MIRGraph::ComputeDominators() {
GetEntryBlock()->i_dom = 0;
PreOrderDfsIterator iter3(this);
- for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+ for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
SetDominators(bb);
}
ReversePostOrderDfsIterator iter4(this);
- for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+ for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
ComputeBlockDominators(bb);
}
// Compute the dominance frontier for each block.
ComputeDomPostOrderTraversal(GetEntryBlock());
PostOrderDOMIterator iter5(this);
- for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+ for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
@@ -434,7 +434,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
@@ -466,7 +466,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
void MIRGraph::FindPhiNodeBlocks() {
RepeatingPostOrderDfsIterator iter(this);
bool change = false;
- for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+ for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
change = ComputeBlockLiveIns(bb);
}
@@ -505,7 +505,7 @@ void MIRGraph::FindPhiNodeBlocks() {
*/
bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
/* Phi nodes are at the beginning of each block */
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
return true;
int ssa_reg = mir->ssa_rep->defs[0];
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a4df00e..c1d5cb7 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,7 +46,7 @@ VerificationResults::~VerificationResults() {
}
bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
- DCHECK(method_verifier != NULL);
+ DCHECK(method_verifier != nullptr);
MethodReference ref = method_verifier->GetMethodReference();
bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 977757f..7eba515 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -166,7 +166,7 @@ void VerifiedMethod::VerifyGcMap(verifier::MethodVerifier* method_verifier,
}
}
} else {
- DCHECK(i >= 65536 || reg_bitmap == NULL);
+ DCHECK(i >= 65536 || reg_bitmap == nullptr);
}
}
}
@@ -283,7 +283,7 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
}
mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
- if (abstract_method == NULL) {
+ if (abstract_method == nullptr) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
continue;
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 437ae52..ad07639 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -59,7 +59,7 @@ class VerifiedMethod {
return safe_cast_set_;
}
- // Returns the devirtualization target method, or nullptr if none.
+ // Returns the devirtualization target method, or null if none.
const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
// Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index b4d4695..bad8335 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -79,7 +79,7 @@ inline ArtField* CompilerDriver::ResolveFieldWithDexFile(
}
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
// ClassLinker can return a field of the wrong kind directly from the DexCache.
- // Silently return nullptr on such incompatible class change.
+ // Silently return null on such incompatible class change.
return nullptr;
}
return resolved_field;
@@ -206,7 +206,7 @@ inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
}
if (check_incompatible_class_change &&
UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
- // Silently return nullptr on incompatible class change.
+ // Silently return null on incompatible class change.
return nullptr;
}
return resolved_method;
@@ -302,7 +302,7 @@ inline int CompilerDriver::IsFastInvoke(
target_dex_cache, class_loader,
NullHandle<mirror::ArtMethod>(), kVirtual);
}
- CHECK(called_method != NULL);
+ CHECK(called_method != nullptr);
CHECK(!called_method->IsAbstract());
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e665e1d..c858326 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -495,7 +495,8 @@ void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
- std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+ std::unique_ptr<ThreadPool> thread_pool(
+ new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
PreCompile(class_loader, dex_files, thread_pool.get(), timings);
Compile(class_loader, dex_files, thread_pool.get(), timings);
@@ -2101,7 +2102,8 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi
VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
+void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
+ size_t class_def_index) {
ATRACE_CALL();
const DexFile& dex_file = *manager->GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -2251,7 +2253,7 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
// Is eligable for compilation by methods-to-compile filter.
IsMethodToCompile(method_ref);
if (compile) {
- // NOTE: if compiler declines to compile this method, it will return nullptr.
+ // NOTE: if compiler declines to compile this method, it will return null.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 50e1fb1..03c5c5c 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -94,7 +94,7 @@ class CompilerDriver {
// Create a compiler targeting the requested "instruction_set".
// "image" should be true if image specific optimizations should be
// enabled. "image_classes" lets the compiler know what classes it
- // can assume will be in the image, with nullptr implying all available
+ // can assume will be in the image, with null implying all available
// classes.
explicit CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
@@ -228,7 +228,7 @@ class CompilerDriver {
mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve compiling method's class. Returns nullptr on failure.
+ // Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
@@ -240,7 +240,7 @@ class CompilerDriver {
const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a field. Returns nullptr on failure, including incompatible class change.
+ // Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
ArtField* ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -290,7 +290,7 @@ class CompilerDriver {
ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a method. Returns nullptr on failure, including incompatible class change.
+ // Resolve a method. Returns null on failure, including incompatible class change.
mirror::ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -592,16 +592,16 @@ class CompilerDriver {
const bool image_;
// If image_ is true, specifies the classes that will be included in
- // the image. Note if image_classes_ is nullptr, all classes are
+ // the image. Note if image_classes_ is null, all classes are
// included in the image.
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
- // Specifies the classes that will be compiled. Note that if classes_to_compile_ is nullptr,
+ // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
// all classes are eligible for compilation (duplication filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
- // Specifies the methods that will be compiled. Note that if methods_to_compile_ is nullptr,
+ // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null,
// all methods are eligible for compilation (compilation filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index ded50ca..5085f32 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -56,20 +56,20 @@ class CompilerDriverTest : public CommonCompilerTest {
CHECK(started);
env_ = Thread::Current()->GetJniEnv();
class_ = env_->FindClass(class_name);
- CHECK(class_ != NULL) << "Class not found: " << class_name;
+ CHECK(class_ != nullptr) << "Class not found: " << class_name;
if (is_virtual) {
mid_ = env_->GetMethodID(class_, method, signature);
} else {
mid_ = env_->GetStaticMethodID(class_, method, signature);
}
- CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature;
+ CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature;
}
void MakeAllExecutable(jobject class_loader) {
const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
MakeDexFileExecutable(class_loader, *dex_file);
}
}
@@ -84,7 +84,7 @@ class CompilerDriverTest : public CommonCompilerTest {
Handle<mirror::ClassLoader> loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
- CHECK(c != NULL);
+ CHECK(c != nullptr);
for (size_t j = 0; j < c->NumDirectMethods(); j++) {
MakeExecutable(c->GetDirectMethod(j));
}
@@ -101,39 +101,38 @@ class CompilerDriverTest : public CommonCompilerTest {
// Disabled due to 10 second runtime on host
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
- CompileAll(NULL);
+ CompileAll(nullptr);
// All libcore references should resolve
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(java_lang_dex_file_ != NULL);
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex = *java_lang_dex_file_;
mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
const mirror::String* string = dex_cache->GetResolvedString(i);
- EXPECT_TRUE(string != NULL) << "string_idx=" << i;
+ EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
}
EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
mirror::Class* type = dex_cache->GetResolvedType(i);
- EXPECT_TRUE(type != NULL) << "type_idx=" << i
+ EXPECT_TRUE(type != nullptr) << "type_idx=" << i
<< " " << dex.GetTypeDescriptor(dex.GetTypeId(i));
}
EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
- EXPECT_TRUE(method != NULL) << "method_idx=" << i
+ EXPECT_TRUE(method != nullptr) << "method_idx=" << i
<< " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
<< " " << dex.GetMethodName(dex.GetMethodId(i));
- EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != NULL) << "method_idx=" << i
- << " "
- << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
- << " " << dex.GetMethodName(dex.GetMethodId(i));
+ EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i
+ << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " "
+ << dex.GetMethodName(dex.GetMethodId(i));
}
EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
- EXPECT_TRUE(field != NULL) << "field_idx=" << i
+ EXPECT_TRUE(field != nullptr) << "field_idx=" << i
<< " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
<< " " << dex.GetFieldName(dex.GetFieldId(i));
}
@@ -153,14 +152,14 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
}
- ASSERT_TRUE(class_loader != NULL);
+ ASSERT_TRUE(class_loader != nullptr);
EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true);
// Create a jobj_ of ConcreteClass, NOT AbstractClass.
jclass c_class = env_->FindClass("ConcreteClass");
jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
jobject jobj_ = env_->NewObject(c_class, constructor);
- ASSERT_TRUE(jobj_ != NULL);
+ ASSERT_TRUE(jobj_ != nullptr);
// Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index b67dd26..32c8cce 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -374,7 +374,7 @@ class ElfSymtabBuilder FINAL : public ElfSectionBuilder<ElfTypes> {
}
Elf_Word GetSize() const {
- // 1 is for the implicit NULL symbol.
+ // 1 is for the implicit null symbol.
return symbols_.size() + 1;
}
@@ -578,7 +578,7 @@ class ElfBuilder FINAL {
hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
sizeof(Elf_Word)),
dynamic_builder_(".dynamic", &dynsym_builder_),
- shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, NULL, 0, 1, 1) {
+ shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, nullptr, 0, 1, 1) {
SetupEhdr();
SetupDynamic();
SetupRequiredSymbols();
@@ -689,7 +689,7 @@ class ElfBuilder FINAL {
// +-------------------------+ (Optional)
// | .debug_line | (Optional)
// +-------------------------+ (Optional)
- // | Elf_Shdr NULL |
+ // | Elf_Shdr null |
// | Elf_Shdr .dynsym |
// | Elf_Shdr .dynstr |
// | Elf_Shdr .hash |
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 949fcab..3b2ca94 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -148,7 +148,7 @@ bool ElfWriterQuick<ElfTypes>::Write(
RawSection debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
RawSection debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, 1, 0);
+ RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, nullptr, 0, 1, 0);
// Do not add to .oat_patches since we will make the addresses relative.
std::vector<uintptr_t> eh_frame_patches;
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 3e5ad7b..08523d8 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -55,12 +55,12 @@ TEST_F(ElfWriterTest, dlsym) {
LOG(INFO) << "elf_filename=" << elf_filename;
UnreserveImageSpace();
- void* dl_oatdata = NULL;
- void* dl_oatexec = NULL;
- void* dl_oatlastword = NULL;
+ void* dl_oatdata = nullptr;
+ void* dl_oatexec = nullptr;
+ void* dl_oatlastword = nullptr;
std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 8016831..eaf3489 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -68,7 +68,7 @@ TEST_F(ImageTest, WriteRead) {
// TODO: compile_pic should be a test argument.
{
{
- jobject class_loader = NULL;
+ jobject class_loader = nullptr;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
@@ -92,7 +92,7 @@ TEST_F(ImageTest, WriteRead) {
}
// Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
- ASSERT_TRUE(dup_oat.get() != NULL);
+ ASSERT_TRUE(dup_oat.get() != nullptr);
{
bool success_image =
@@ -107,7 +107,7 @@ TEST_F(ImageTest, WriteRead) {
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
ImageHeader image_header;
ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
@@ -118,12 +118,12 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
ASSERT_FALSE(space->IsImageSpace());
- ASSERT_TRUE(space != NULL);
+ ASSERT_TRUE(space != nullptr);
ASSERT_TRUE(space->IsMallocSpace());
ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
}
- ASSERT_TRUE(compiler_driver_->GetImageClasses() != NULL);
+ ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
// Need to delete the compiler since it has worker threads which are attached to runtime.
@@ -137,7 +137,7 @@ TEST_F(ImageTest, WriteRead) {
writer.reset(nullptr);
runtime_.reset();
- java_lang_dex_file_ = NULL;
+ java_lang_dex_file_ = nullptr;
MemMap::Init();
std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
@@ -145,7 +145,7 @@ TEST_F(ImageTest, WriteRead) {
RuntimeOptions options;
std::string image("-Ximage:");
image.append(image_location.GetFilename());
- options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
+ options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
// By default the compiler this creates will not include patch information.
options.push_back(std::make_pair("-Xnorelocate", nullptr));
@@ -158,7 +158,7 @@ TEST_F(ImageTest, WriteRead) {
// give it away now and then switch to a more managable ScopedObjectAccess.
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(runtime_.get() != NULL);
+ ASSERT_TRUE(runtime_.get() != nullptr);
class_linker_ = runtime_->GetClassLinker();
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a99ef34..fc70d8f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -129,7 +129,7 @@ bool ImageWriter::Write(const std::string& image_filename,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
- if (oat_file.get() == NULL) {
+ if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
return false;
}
@@ -180,7 +180,7 @@ bool ImageWriter::Write(const std::string& image_filename,
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- if (image_file.get() == NULL) {
+ if (image_file.get() == nullptr) {
LOG(ERROR) << "Failed to open image file " << image_filename;
return false;
}
@@ -519,7 +519,7 @@ bool ImageWriter::AllocMemory() {
void ImageWriter::ComputeLazyFieldsForImageClasses() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
+ class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
}
bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
@@ -675,7 +675,7 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATT
if (string_id != nullptr) {
// This string occurs in this dex file, assign the dex cache entry.
uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
- if (dex_cache->GetResolvedString(string_idx) == NULL) {
+ if (dex_cache->GetResolvedString(string_idx) == nullptr) {
dex_cache->SetResolvedString(string_idx, string);
}
}
@@ -697,7 +697,7 @@ struct NonImageClasses {
};
void ImageWriter::PruneNonImageClasses() {
- if (compiler_driver_.GetImageClasses() == NULL) {
+ if (compiler_driver_.GetImageClasses() == nullptr) {
return;
}
Runtime* runtime = Runtime::Current();
@@ -712,7 +712,7 @@ void ImageWriter::PruneNonImageClasses() {
// Remove the undesired classes from the class roots.
for (const std::string& it : non_image_classes) {
- bool result = class_linker->RemoveClass(it.c_str(), NULL);
+ bool result = class_linker->RemoveClass(it.c_str(), nullptr);
DCHECK(result);
}
@@ -724,13 +724,13 @@ void ImageWriter::PruneNonImageClasses() {
DexCache* dex_cache = class_linker->GetDexCache(idx);
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
- if (klass != NULL && !IsImageClass(klass)) {
- dex_cache->SetResolvedType(i, NULL);
+ if (klass != nullptr && !IsImageClass(klass)) {
+ dex_cache->SetResolvedType(i, nullptr);
}
}
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
ArtMethod* method = dex_cache->GetResolvedMethod(i);
- if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
+ if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) {
dex_cache->SetResolvedMethod(i, resolution_method);
}
}
@@ -777,14 +777,14 @@ void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
void ImageWriter::DumpImageClasses() {
auto image_classes = compiler_driver_.GetImageClasses();
- CHECK(image_classes != NULL);
+ CHECK(image_classes != nullptr);
for (const std::string& image_class : *image_classes) {
LOG(INFO) << " " << image_class;
}
}
void ImageWriter::CalculateObjectBinSlots(Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
// we must be an interned string that was forward referenced and already assigned
@@ -856,7 +856,7 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
- CHECK(image_roots->Get(i) != NULL);
+ CHECK(image_roots->Get(i) != nullptr);
}
return image_roots.Get();
}
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index d25acc7..436fc0c 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@ ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
@@ -122,7 +122,7 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch
return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 2402ea5..6f2cb25 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -152,9 +152,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// References need placing in handle scope and the entry value passing
if (ref_param) {
// Compute handle scope entry, note null is placed in the handle scope but its boxed value
- // must be NULL
+ // must be null.
FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- // Check handle scope offset is within frame and doesn't run into the saved segment state
+ // Check handle scope offset is within frame and doesn't run into the saved segment state.
CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
CHECK_NE(handle_scope_offset.Uint32Value(),
main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
@@ -243,9 +243,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// 7. Iterate over arguments placing values from managed calling convention in
// to the convention required for a native call (shuffling). For references
// place an index/pointer to the reference after checking whether it is
- // NULL (which must be encoded as NULL).
+ // null (which must be encoded as null).
// Note: we do this prior to materializing the JNIEnv* and static's jclass to
- // give as many free registers for the shuffle as possible
+ // give as many free registers for the shuffle as possible.
mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
uint32_t args_count = 0;
while (mr_conv->HasNext()) {
@@ -451,7 +451,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
ArrayRef<const LinkerPatch>());
}
-// Copy a single parameter from the managed to the JNI calling convention
+// Copy a single parameter from the managed to the JNI calling convention.
static void CopyParameter(Assembler* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
JniCallingConvention* jni_conv,
@@ -469,7 +469,7 @@ static void CopyParameter(Assembler* jni_asm,
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in handle scope and the entry address passing
+ // References need placing in handle scope and the entry address passing.
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
// Compute handle scope offset. Note null is placed in the handle scope but the jobject
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5abd204..d2d38da 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -73,7 +73,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
image_file_location_oat_begin_(image_file_location_oat_begin),
image_patch_delta_(image_patch_delta),
key_value_store_(key_value_store),
- oat_header_(NULL),
+ oat_header_(nullptr),
size_dex_file_alignment_(0),
size_executable_offset_alignment_(0),
size_oat_header_(0),
@@ -326,7 +326,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = writer_->compiler_driver_->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -473,7 +473,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -690,7 +690,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -893,7 +893,7 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -940,7 +940,7 @@ bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
}
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data != NULL) { // ie not an empty class, such as a marker interface
+ if (class_data != nullptr) { // ie not an empty class, such as a marker interface
ClassDataItemIterator it(*dex_file, class_data);
while (it.HasNextStaticField()) {
it.Next();
@@ -987,7 +987,7 @@ size_t OatWriter::InitOatDexFiles(size_t offset) {
// create the OatDexFiles
for (size_t i = 0; i != dex_files_->size(); ++i) {
const DexFile* dex_file = (*dex_files_)[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
OatDexFile* oat_dex_file = new OatDexFile(offset, *dex_file);
oat_dex_files_.push_back(oat_dex_file);
offset += oat_dex_file->SizeOf();
@@ -1471,13 +1471,13 @@ OatWriter::OatClass::OatClass(size_t offset,
oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
} else {
- method_bitmap_ = NULL;
+ method_bitmap_ = nullptr;
method_bitmap_size_ = 0;
}
for (size_t i = 0; i < num_methods; i++) {
CompiledMethod* compiled_method = compiled_methods_[i];
- if (compiled_method == NULL) {
+ if (compiled_method == nullptr) {
oat_method_offsets_offsets_from_oat_class_[i] = 0;
} else {
oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index cc2b39a..8c79b44 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -235,13 +235,13 @@ class OatWriter {
// used to validate file position when writing.
size_t offset_;
- // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+ // CompiledMethods for each class_def_method_index, or null if no method is available.
std::vector<CompiledMethod*> compiled_methods_;
// Offset from OatClass::offset_ to the OatMethodOffsets for the
// class_def_method_index. If 0, it means the corresponding
// CompiledMethod entry in OatClass::compiled_methods_ should be
- // NULL and that the OatClass::type_ should be kOatClassBitmap.
+ // null and that the OatClass::type_ should be kOatClassBitmap.
std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
// data to write
@@ -258,12 +258,12 @@ class OatWriter {
// OatClassType::type_ is kOatClassBitmap, a set bit indicates the
// method has an OatMethodOffsets in methods_offsets_, otherwise
// the entry was ommited to save space. If OatClassType::type_ is
- // not is kOatClassBitmap, the bitmap will be NULL.
+ // not is kOatClassBitmap, the bitmap will be null.
BitVector* method_bitmap_;
// OatMethodOffsets and OatMethodHeaders for each CompiledMethod
// present in the OatClass. Note that some may be missing if
- // OatClass::compiled_methods_ contains NULL values (and
+ // OatClass::compiled_methods_ contains null values (and
// oat_method_offsets_offsets_from_oat_class_ should contain 0
// values in this case).
std::vector<OatMethodOffsets> method_offsets_;
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 74848d5..708733e 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -55,7 +55,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
buckets_owned_(allocator, num_buckets_, false),
num_entries_(to_copy.num_entries_) {
// ArenaAllocator returns zeroed memory, so entries of buckets_ and
- // buckets_owned_ are initialized to nullptr and false, respectively.
+ // buckets_owned_ are initialized to null and false, respectively.
DCHECK(IsPowerOfTwo(num_buckets_));
if (num_buckets_ == to_copy.num_buckets_) {
// Hash table remains the same size. We copy the bucket pointers and leave
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index bef5896..6ab57b8 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -714,7 +714,7 @@ HConstant* HUnaryOperation::TryStaticEvaluation() const {
// TODO: Implement static evaluation of long unary operations.
//
// Do not exit with a fatal condition here. Instead, simply
- // return `nullptr' to notify the caller that this instruction
+ // return `null' to notify the caller that this instruction
// cannot (yet) be statically evaluated.
return nullptr;
}
@@ -750,7 +750,7 @@ HConstant* HBinaryOperation::GetConstantRight() const {
}
// If `GetConstantRight()` returns one of the input, this returns the other
-// one. Otherwise it returns nullptr.
+// one. Otherwise it returns null.
HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
HInstruction* most_constant_right = GetConstantRight();
if (most_constant_right == nullptr) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1a24cb5..0993a18 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1634,7 +1634,7 @@ class HUnaryOperation : public HExpression<1> {
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x`.
@@ -1702,7 +1702,7 @@ class HBinaryOperation : public HExpression<2> {
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x` and `y`.
@@ -1710,11 +1710,11 @@ class HBinaryOperation : public HExpression<2> {
virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
// Returns an input that can legally be used as the right input and is
- // constant, or nullptr.
+ // constant, or null.
HConstant* GetConstantRight() const;
// If `GetConstantRight()` returns one of the input, this returns the other
- // one. Otherwise it returns nullptr.
+ // one. Otherwise it returns null.
HInstruction* GetLeastConstantLeft() const;
DECLARE_INSTRUCTION(BinaryOperation);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 03f5545..fe70d3a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -333,7 +333,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
if (after_loop == nullptr) {
// Uses are only in the loop.
- first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start, end, nullptr);
} else if (after_loop->GetStart() <= end) {
first_range_ = range_search_start_ = after_loop;
// There are uses after the loop.
@@ -596,7 +597,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
previous->next_ = nullptr;
new_interval->first_range_ = current;
if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
- // Search start point is inside `new_interval`. Change it to nullptr
+ // Search start point is inside `new_interval`. Change it to null
// (i.e. the end of the interval) in the original interval.
range_search_start_ = nullptr;
}
@@ -863,7 +864,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
defined_by_(defined_by) {}
// Searches for a LiveRange that either covers the given position or is the
- // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+ // first next LiveRange. Returns null if no such LiveRange exists. Ranges
// known to end before `position` can be skipped with `search_start`.
LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
if (kIsDebugBuild) {
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index bba9892..fbc9d0d 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -66,7 +66,7 @@ TEST_F(OutputStreamTest, File) {
SetOutputStream(output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
@@ -76,12 +76,12 @@ TEST_F(OutputStreamTest, File) {
TEST_F(OutputStreamTest, Buffered) {
ScratchFile tmp;
std::unique_ptr<FileOutputStream> file_output_stream(new FileOutputStream(tmp.GetFile()));
- CHECK(file_output_stream.get() != NULL);
+ CHECK(file_output_stream.get() != nullptr);
BufferedOutputStream buffered_output_stream(file_output_stream.release());
SetOutputStream(buffered_output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dd0dba2..313f365 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -739,17 +739,17 @@ class ArmAssembler : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b7715af..e47b531 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -149,14 +149,14 @@ class Arm64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 36342c6..b016e74 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -41,8 +41,8 @@ AssemblerBuffer::AssemblerBuffer() {
contents_ = NewContents(kInitialBufferCapacity);
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
- fixup_ = NULL;
- slow_path_ = NULL;
+ fixup_ = nullptr;
+ slow_path_ = nullptr;
#ifndef NDEBUG
has_ensured_capacity_ = false;
fixups_processed_ = false;
@@ -61,7 +61,7 @@ AssemblerBuffer::~AssemblerBuffer() {
void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
AssemblerFixup* fixup = fixup_;
- while (fixup != NULL) {
+ while (fixup != nullptr) {
fixup->Process(region, fixup->position());
fixup = fixup->previous();
}
@@ -127,7 +127,7 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
return new x86_64::X86_64Assembler();
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index ebafd3d..2e3a47b 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -156,7 +156,7 @@ class AssemblerFixup {
// Parent of all queued slow paths, emitted during finalization
class SlowPath {
public:
- SlowPath() : next_(NULL) {}
+ SlowPath() : next_(nullptr) {}
virtual ~SlowPath() {}
Label* Continuation() { return &continuation_; }
@@ -216,20 +216,20 @@ class AssemblerBuffer {
}
void EnqueueSlowPath(SlowPath* slowpath) {
- if (slow_path_ == NULL) {
+ if (slow_path_ == nullptr) {
slow_path_ = slowpath;
} else {
SlowPath* cur = slow_path_;
- for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+ for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
cur->next_ = slowpath;
}
}
void EmitSlowPaths(Assembler* sp_asm) {
SlowPath* cur = slow_path_;
- SlowPath* next = NULL;
- slow_path_ = NULL;
- for ( ; cur != NULL ; cur = next) {
+ SlowPath* next = nullptr;
+ slow_path_ = nullptr;
+ for ( ; cur != nullptr ; cur = next) {
cur->Emit(sp_asm);
next = cur->next_;
delete cur;
@@ -489,14 +489,14 @@ class Assembler {
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index b062a2a..a9a5781 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -40,8 +40,8 @@ class DedupeSet {
struct HashedKey {
StoreKey* store_ptr;
union {
- HashType store_hash; // Valid if store_ptr != nullptr.
- const HashedInKey* in_key; // Valid if store_ptr == nullptr.
+ HashType store_hash; // Valid if store_ptr != null.
+ const HashedInKey* in_key; // Valid if store_ptr == null.
};
};
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 216cb41..d4acf03 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,17 +238,17 @@ class MipsAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister mscratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 36e74d7..b7f6a9e 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -235,14 +235,14 @@ class Mips64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
mscratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a933474..7fc8ef0 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -576,17 +576,17 @@ class X86Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 0344f52..c0ca7ef 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2751,7 +2751,7 @@ void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
- // Use out_reg as indicator of NULL
+ // Use out_reg as indicator of null.
in_reg = out_reg;
// TODO: movzwl
movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 79ad8f5..f5327a8 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -711,17 +711,17 @@ class X86_64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index fd03002..85debe4 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -31,19 +31,19 @@ namespace art {
// Determine whether or not the specified method is public.
static bool IsMethodPublic(JNIEnv* env, jclass c, jmethodID method_id) {
ScopedLocalRef<jobject> reflected(env, env->ToReflectedMethod(c, method_id, JNI_FALSE));
- if (reflected.get() == NULL) {
+ if (reflected.get() == nullptr) {
fprintf(stderr, "Failed to get reflected method\n");
return false;
}
// We now have a Method instance. We need to call its
// getModifiers() method.
jclass method_class = env->FindClass("java/lang/reflect/Method");
- if (method_class == NULL) {
+ if (method_class == nullptr) {
fprintf(stderr, "Failed to find class java.lang.reflect.Method\n");
return false;
}
jmethodID mid = env->GetMethodID(method_class, "getModifiers", "()I");
- if (mid == NULL) {
+ if (mid == nullptr) {
fprintf(stderr, "Failed to find java.lang.reflect.Method.getModifiers\n");
return false;
}
@@ -61,7 +61,7 @@ static int InvokeMain(JNIEnv* env, char** argv) {
// it. Create an array and populate it. Note argv[0] is not
// included.
ScopedLocalRef<jobjectArray> args(env, toStringArray(env, argv + 1));
- if (args.get() == NULL) {
+ if (args.get() == nullptr) {
env->ExceptionDescribe();
return EXIT_FAILURE;
}
@@ -73,14 +73,14 @@ static int InvokeMain(JNIEnv* env, char** argv) {
std::replace(class_name.begin(), class_name.end(), '.', '/');
ScopedLocalRef<jclass> klass(env, env->FindClass(class_name.c_str()));
- if (klass.get() == NULL) {
+ if (klass.get() == nullptr) {
fprintf(stderr, "Unable to locate class '%s'\n", class_name.c_str());
env->ExceptionDescribe();
return EXIT_FAILURE;
}
jmethodID method = env->GetStaticMethodID(klass.get(), "main", "([Ljava/lang/String;)V");
- if (method == NULL) {
+ if (method == nullptr) {
fprintf(stderr, "Unable to find static main(String[]) in '%s'\n", class_name.c_str());
env->ExceptionDescribe();
return EXIT_FAILURE;
@@ -106,7 +106,7 @@ static int InvokeMain(JNIEnv* env, char** argv) {
// Parse arguments. Most of it just gets passed through to the runtime.
// The JNI spec defines a handful of standard arguments.
static int dalvikvm(int argc, char** argv) {
- setvbuf(stdout, NULL, _IONBF, 0);
+ setvbuf(stdout, nullptr, _IONBF, 0);
// Skip over argv[0].
argv++;
@@ -125,8 +125,8 @@ static int dalvikvm(int argc, char** argv) {
//
// [Do we need to catch & handle "-jar" here?]
bool need_extra = false;
- const char* lib = NULL;
- const char* what = NULL;
+ const char* lib = nullptr;
+ const char* what = nullptr;
int curr_opt, arg_idx;
for (curr_opt = arg_idx = 0; arg_idx < argc; arg_idx++) {
if (argv[arg_idx][0] != '-' && !need_extra) {
@@ -172,8 +172,8 @@ static int dalvikvm(int argc, char** argv) {
init_args.ignoreUnrecognized = JNI_FALSE;
// Start the runtime. The current thread becomes the main thread.
- JavaVM* vm = NULL;
- JNIEnv* env = NULL;
+ JavaVM* vm = nullptr;
+ JNIEnv* env = nullptr;
if (JNI_CreateJavaVM(&vm, &env, &init_args) != JNI_OK) {
fprintf(stderr, "Failed to initialize runtime (check log for details)\n");
return EXIT_FAILURE;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9c01a0f..2a3a346 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1518,7 +1518,7 @@ class Dex2Oat FINAL {
static size_t OpenDexFiles(const std::vector<const char*>& dex_filenames,
const std::vector<const char*>& dex_locations,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is nullptr";
size_t failure_count = 0;
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i];
@@ -1559,7 +1559,7 @@ class Dex2Oat FINAL {
static void OpenClassPathFiles(const std::string& class_path,
std::vector<const DexFile*> dex_files,
std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
- DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is NULL";
+ DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is nullptr";
std::vector<std::string> parsed;
Split(class_path, ':', &parsed);
// Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained.
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index c05c3ed..6334717 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -42,7 +42,7 @@ Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerO
return new x86::DisassemblerX86(options, true);
} else {
UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 34a4c14..1056fe1 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -97,7 +97,8 @@ class ImgDiagDumper {
{
struct stat sts;
- std::string proc_pid_str = StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string proc_pid_str =
+ StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
if (stat(proc_pid_str.c_str(), &sts) == -1) {
os << "Process does not exist";
return false;
@@ -144,7 +145,8 @@ class ImgDiagDumper {
const size_t pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
- std::string file_name = StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string file_name =
+ StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
size_t boot_map_size = boot_map.end - boot_map.start;
@@ -197,8 +199,8 @@ class ImgDiagDumper {
return false;
}
- std::string page_map_file_name = StringPrintf("/proc/%ld/pagemap",
- static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string page_map_file_name = StringPrintf(
+ "/proc/%ld/pagemap", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
auto page_map_file = std::unique_ptr<File>(OS::OpenFileForReading(page_map_file_name.c_str()));
if (page_map_file == nullptr) {
os << "Failed to open " << page_map_file_name << " for reading: " << strerror(errno);
@@ -226,8 +228,10 @@ class ImgDiagDumper {
return false;
}
- std::set<size_t> dirty_page_set_remote; // Set of the remote virtual page indices that are dirty
- std::set<size_t> dirty_page_set_local; // Set of the local virtual page indices that are dirty
+ // Set of the remote virtual page indices that are dirty
+ std::set<size_t> dirty_page_set_remote;
+ // Set of the local virtual page indices that are dirty
+ std::set<size_t> dirty_page_set_local;
size_t different_int32s = 0;
size_t different_bytes = 0;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d6d8808..f2e35af 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -144,7 +144,7 @@ class OatSymbolizer FINAL : public CodeOutput {
std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file_->GetOatDexFiles();
for (size_t i = 0; i < oat_dex_files.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
- CHECK(oat_dex_file != NULL);
+ CHECK(oat_dex_file != nullptr);
WalkOatDexFile(oat_dex_file, callback);
}
}
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index 5bdeda7..a58aecb 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -80,7 +80,7 @@ class ArmContext : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations, initialized to NULL or the specific registers below.
+ // Pointers to register locations, initialized to null or the specific registers below.
uintptr_t* gprs_[kNumberOfCoreRegisters];
uint32_t* fprs_[kNumberOfSRegisters];
// Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 055b5ab..f14dfc2 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -143,11 +143,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 3e8b367..d84cb53 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -56,7 +56,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
sc->arm_r0 = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
sc->arm_r1 = 1;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 9bd8ba7..8f6162f 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -320,7 +320,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * If unsuccessful, the helper will return null/null. There will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -359,7 +359,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
* Quick invocation stub internal.
* On entry:
* r0 = method pointer
- * r1 = argument array or NULL for no argument methods
+ * r1 = argument array or null for no argument methods
* r2 = size of argument array in bytes
* r3 = (managed) thread pointer
* [sp] = JValue* result
@@ -409,7 +409,7 @@ ENTRY art_quick_invoke_stub_internal
add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
bl memcpy @ memcpy (dest, src, bytes)
mov ip, #0 @ set ip to 0
- str ip, [sp] @ store NULL for method* at bottom of frame
+ str ip, [sp] @ store null for method* at bottom of frame
ldr ip, [r11, #48] @ load fp register argument array pointer
vldm ip, {s0-s15} @ copy s0 - s15
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index f486779..0383ad6 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -80,7 +80,7 @@ class Arm64Context : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations, initialized to NULL or the specific registers below.
+ // Pointers to register locations, initialized to null or the specific registers below.
uintptr_t* gprs_[kNumberOfXRegisters];
uint64_t * fprs_[kNumberOfDRegisters];
// Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 6c787e3..4b12f00 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -105,7 +105,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // TODO nullptr entrypoints not needed for ARM64 - generate inline.
+ // TODO null entrypoints not needed for ARM64 - generate inline.
qpoints->pCmpgDouble = nullptr;
qpoints->pCmpgFloat = nullptr;
qpoints->pCmplDouble = nullptr;
@@ -135,11 +135,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index c914d85..0448c76 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -45,7 +45,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
sc->regs[0] = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
sc->regs[1] = 1;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 4079436..cbd4b7c 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -466,7 +466,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
* The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
* of the target Method* in x0 and method->code_ in x1.
*
- * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -565,7 +565,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
// W2 - args length
// X9 - destination address.
// W10 - temporary
- add x9, sp, #4 // Destination address is bottom of stack + NULL.
+ add x9, sp, #4 // Destination address is bottom of stack + null.
// Use \@ to differentiate between macro invocations.
.LcopyParams\@:
@@ -579,7 +579,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
.LendCopyParams\@:
- // Store NULL into StackReference<Method>* at bottom of frame.
+ // Store null into StackReference<Method>* at bottom of frame.
str wzr, [sp]
#if (STACK_REFERENCE_SIZE != 4)
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index cbad3f963..d01b95e 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -79,7 +79,7 @@ class MipsContext : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ // Pointers to registers in the stack, initialized to null except for the special cases below.
uintptr_t* gprs_[kNumberOfCoreRegisters];
uint32_t* fprs_[kNumberOfFRegisters];
// Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e3ec27c..a980a86 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -199,7 +199,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
qpoints->pF2iz = art_f2i;
static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
- qpoints->pIdivmod = NULL;
+ qpoints->pIdivmod = nullptr;
qpoints->pD2l = art_d2l;
static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
qpoints->pF2l = art_f2l;
@@ -228,19 +228,24 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 0c2250e..622c48f 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -446,7 +446,7 @@ END art_quick_throw_no_such_method
* The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -484,7 +484,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
* Invocation stub for quick code.
* On entry:
* a0 = method pointer
- * a1 = argument array or NULL for no argument methods
+ * a1 = argument array or null for no argument methods
* a2 = size of argument array in bytes
* a3 = (managed) thread pointer
* [sp + 16] = JValue* result
@@ -520,7 +520,7 @@ ENTRY art_quick_invoke_stub
lw $a3, 12($sp) # copy arg value for a3
lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
move $sp, $fp # restore the stack
lw $s0, 0($sp)
.cfi_restore 16
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index 2cc2b8d..ebc036c 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -79,7 +79,7 @@ class Mips64Context : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ // Pointers to registers in the stack, initialized to null except for the special cases below.
uintptr_t* gprs_[kNumberOfGpuRegisters];
uint64_t* fprs_[kNumberOfFpuRegisters];
// Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 4a3bf02..b328708 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -135,15 +135,15 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pL2f = art_l2f;
qpoints->pD2iz = art_d2i;
qpoints->pF2iz = art_f2i;
- qpoints->pIdivmod = NULL;
+ qpoints->pIdivmod = nullptr;
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
qpoints->pLdiv = artLdiv;
qpoints->pLmod = artLmod;
qpoints->pLmul = artLmul;
- qpoints->pShlLong = NULL;
- qpoints->pShrLong = NULL;
- qpoints->pUshrLong = NULL;
+ qpoints->pShlLong = nullptr;
+ qpoints->pShrLong = nullptr;
+ qpoints->pUshrLong = nullptr;
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
@@ -154,11 +154,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 3d502e6..bf18dd5 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -503,7 +503,7 @@ END art_quick_throw_no_such_method
* The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the ra
@@ -656,7 +656,7 @@ call_fn:
# call method (a0 and a1 have been untouched)
lwu $a1, 0($a1) # make a1 = this ptr
sw $a1, 4($sp) # copy this ptr (skip 4 bytes for method*)
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
@@ -758,7 +758,7 @@ ENTRY art_quick_invoke_static_stub
call_sfn:
# call method (a0 has been untouched)
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9cccf7c..0d9a888 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -117,7 +117,7 @@ class StubTest : public CommonRuntimeTest {
"add sp, sp, #20\n\t"
"blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop nullptr and padding
+ "add sp, sp, #12\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -12\n\t"
"pop {r1-r12, lr}\n\t" // Restore state
".cfi_adjust_cfa_offset -52\n\t"
@@ -269,7 +269,7 @@ class StubTest : public CommonRuntimeTest {
"pushq (%%rsp)\n\t" // & 16B alignment padding
".cfi_adjust_cfa_offset 16\n\t"
"call *%%rax\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop nullptr and padding
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
: "=a" (result)
// Use the result from rax
@@ -344,7 +344,7 @@ class StubTest : public CommonRuntimeTest {
"add sp, sp, #24\n\t"
"blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop nullptr and padding
+ "add sp, sp, #12\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -12\n\t"
"pop {r1-r12, lr}\n\t" // Restore state
".cfi_adjust_cfa_offset -52\n\t"
@@ -495,7 +495,7 @@ class StubTest : public CommonRuntimeTest {
"pushq (%%rsp)\n\t" // & 16B alignment padding
".cfi_adjust_cfa_offset 16\n\t"
"call *%%rbx\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop nullptr and padding
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
: "=a" (result)
// Use the result from rax
@@ -1032,7 +1032,7 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
@@ -1046,7 +1046,7 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
@@ -1166,7 +1166,7 @@ TEST_F(StubTest, AllocObjectArray) {
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
reinterpret_cast<size_t>(nullptr),
@@ -1788,9 +1788,9 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
JNIEnv* env = Thread::Current()->GetJniEnv();
jclass jc = env->FindClass("AllFields");
- CHECK(jc != NULL);
+ CHECK(jc != nullptr);
jobject o = env->AllocObject(jc);
- CHECK(o != NULL);
+ CHECK(o != nullptr);
ScopedObjectAccess soa(self);
StackHandleScope<4> hs(self);
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index ace4670..a783d48 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -92,7 +92,7 @@ class X86Context : public Context {
XMM7_0, XMM7_1,
kNumberOfFloatRegisters};
- // Pointers to register locations. Values are initialized to NULL or the special registers below.
+ // Pointers to register locations. Values are initialized to null or the special registers below.
uintptr_t* gprs_[kNumberOfCpuRegisters];
uint32_t* fprs_[kNumberOfFloatRegisters];
// Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index c012173..a371632 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -96,17 +96,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // points->pCmpgDouble = NULL; // Not needed on x86.
- // points->pCmpgFloat = NULL; // Not needed on x86.
- // points->pCmplDouble = NULL; // Not needed on x86.
- // points->pCmplFloat = NULL; // Not needed on x86.
- // qpoints->pFmod = NULL; // Not needed on x86.
- // qpoints->pL2d = NULL; // Not needed on x86.
- // qpoints->pFmodf = NULL; // Not needed on x86.
- // qpoints->pL2f = NULL; // Not needed on x86.
- // points->pD2iz = NULL; // Not needed on x86.
- // points->pF2iz = NULL; // Not needed on x86.
- // qpoints->pIdivmod = NULL; // Not needed on x86.
qpoints->pD2l = art_quick_d2l;
qpoints->pF2l = art_quick_f2l;
qpoints->pLdiv = art_quick_ldiv;
@@ -125,11 +114,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 8712506..2de69aa 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -240,7 +240,7 @@ void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
// this code the same for both 32 and 64 bit.
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
uc->CTX_JMP_BUF = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c5a020a..c5d8b8f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -285,7 +285,7 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromC
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * If unsuccessful, the helper will return null/null will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -408,7 +408,7 @@ END_MACRO
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
@@ -442,7 +442,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
- movl LITERAL(0), (%esp) // store NULL for method*
+ movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
@@ -506,7 +506,7 @@ END_FUNCTION art_quick_invoke_stub
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
@@ -539,7 +539,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
- movl LITERAL(0), (%esp) // store NULL for method*
+ movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
@@ -1352,7 +1352,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
- test %eax, %eax // if code pointer is NULL goto deliver pending exception
+ test %eax, %eax // if code pointer is null goto deliver pending exception
jz 1f
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP
1:
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d03aa45..c9b0ff6 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -79,7 +79,7 @@ class X86_64Context : public Context {
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations. Values are initialized to NULL or the special registers below.
+ // Pointers to register locations. Values are initialized to null or the special registers below.
uintptr_t* gprs_[kNumberOfCpuRegisters];
uint64_t* fprs_[kNumberOfFloatRegisters];
// Hold values for rsp and rip if they are not located within a stack frame. RIP is somewhat
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 3bc0dc4..0cddec4 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -101,17 +101,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // points->pCmpgDouble = NULL; // Not needed on x86.
- // points->pCmpgFloat = NULL; // Not needed on x86.
- // points->pCmplDouble = NULL; // Not needed on x86.
- // points->pCmplFloat = NULL; // Not needed on x86.
- // qpoints->pFmod = NULL; // Not needed on x86.
- // qpoints->pL2d = NULL; // Not needed on x86.
- // qpoints->pFmodf = NULL; // Not needed on x86.
- // qpoints->pL2f = NULL; // Not needed on x86.
- // points->pD2iz = NULL; // Not needed on x86.
- // points->pF2iz = NULL; // Not needed on x86.
- // qpoints->pIdivmod = NULL; // Not needed on x86.
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
qpoints->pLdiv = art_quick_ldiv;
@@ -122,7 +111,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pUshrLong = art_quick_lushr;
// Intrinsics
- // qpoints->pIndexOf = NULL; // Not needed on x86.
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = art_quick_memcpy;
@@ -130,11 +118,16 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ce21f01..8185deb 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -348,7 +348,7 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromC
* The helper will attempt to locate the target and return a 128-bit result in rax/rdx consisting
* of the target Method* in rax and method->code_ in rdx.
*
- * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the return
@@ -506,7 +506,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
@@ -554,7 +554,7 @@ END_FUNCTION art_quick_invoke_stub
* On entry:
* [sp] = return address
* rdi = method pointer
- * rsi = argument array or NULL if no arguments.
+ * rsi = argument array or null if no arguments.
* rdx = size of argument array in bytes
* rcx = (managed) thread pointer
* r8 = JValue* result
@@ -600,7 +600,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
@@ -1302,7 +1302,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
- testq %r10, %r10 // If code pointer is NULL goto deliver pending exception.
+ testq %r10, %r10 // If code pointer is null goto deliver pending exception.
jz 1f
jmp *%r10 // Tail call into method.
1:
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index a2625e2..4991ad7 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -80,7 +80,7 @@ inline void ArtField::Set32(mirror::Object* object, uint32_t new_value) {
}
inline uint64_t ArtField::Get64(mirror::Object* object) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
return object->GetField64Volatile(GetOffset());
@@ -90,7 +90,7 @@ inline uint64_t ArtField::Get64(mirror::Object* object) {
template<bool kTransactionActive>
inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
object->SetField64Volatile<kTransactionActive>(GetOffset(), new_value);
@@ -100,7 +100,7 @@ inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
}
inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
return object->GetFieldObjectVolatile<mirror::Object>(GetOffset());
@@ -110,7 +110,7 @@ inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
template<bool kTransactionActive>
inline void ArtField::SetObj(mirror::Object* object, mirror::Object* new_value) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 16c46f0..c0620bf 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -158,7 +158,7 @@ class ArtField {
return (GetAccessFlags() & kAccVolatile) != 0;
}
- // Returns an instance field with this offset in the given class or nullptr if not found.
+ // Returns an instance field with this offset in the given class or null if not found.
static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc
index 5423ff0..bce6b53 100644
--- a/runtime/base/hex_dump.cc
+++ b/runtime/base/hex_dump.cc
@@ -27,7 +27,7 @@ void HexDump::Dump(std::ostream& os) const {
return;
}
- if (address_ == NULL) {
+ if (address_ == nullptr) {
os << "00000000:";
return;
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 0764b87..0ae7863 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -91,7 +91,7 @@ void InitLogging(char* argv[]) {
gProgramInvocationShortName.reset(new std::string((last_slash != nullptr) ? last_slash + 1
: argv[0]));
} else {
- // TODO: fall back to /proc/self/cmdline when argv is NULL on Linux.
+ // TODO: fall back to /proc/self/cmdline when argv is null on Linux.
gCmdLine.reset(new std::string("<unset>"));
}
const char* tags = getenv("ANDROID_LOG_TAGS");
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 014f4ab..8b34374 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -72,7 +72,7 @@ extern unsigned int gAborting;
// This can be used to reveal or conceal logs with specific tags.
extern void InitLogging(char* argv[]);
-// Returns the command line used to invoke the current tool or nullptr if InitLogging hasn't been
+// Returns the command line used to invoke the current tool or null if InitLogging hasn't been
// performed.
extern const char* GetCmdLine();
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index cb69817..a727992 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -39,13 +39,14 @@
namespace art {
#if ART_USE_FUTEXES
-static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
+static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
+ volatile int *uaddr2, int val3) {
return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
}
#endif // ART_USE_FUTEXES
static inline uint64_t SafeGetTid(const Thread* self) {
- if (self != NULL) {
+ if (self != nullptr) {
return static_cast<uint64_t>(self->GetTid());
} else {
return static_cast<uint64_t>(GetTid());
@@ -77,7 +78,7 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY
}
inline void BaseMutex::RegisterAsLocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -86,7 +87,7 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
bool bad_mutexes_held = false;
for (int i = level_; i >= 0; --i) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (UNLIKELY(held_mutex != NULL)) {
+ if (UNLIKELY(held_mutex != nullptr)) {
LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << " - " << i
<< ") while locking \"" << name_ << "\" "
@@ -109,7 +110,7 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
}
inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -117,12 +118,12 @@ inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
}
- self->SetHeldMutex(level_, NULL);
+ self->SetHeldMutex(level_, nullptr);
}
}
inline void ReaderWriterMutex::SharedLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -143,7 +144,7 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) {
}
inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
AssertSharedHeld(self);
RegisterAsUnlocked(self);
@@ -161,7 +162,7 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
if (num_pending_writers_.LoadRelaxed() > 0 ||
num_pending_readers_.LoadRelaxed() > 0) {
// Wake any exclusive waiters as there are now no readers.
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -174,11 +175,11 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
}
inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+ if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
@@ -190,11 +191,11 @@ inline uint64_t Mutex::GetExclusiveOwnerTid() const {
}
inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity that if the pthread thinks we own the lock the Thread agrees.
- if (self != NULL && result) {
+ if (self != nullptr && result) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 13dcb8c..99c7246 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -67,7 +67,7 @@ struct AllMutexData {
Atomic<const BaseMutex*> all_mutexes_guard;
// All created mutexes guarded by all_mutexes_guard_.
std::set<BaseMutex*>* all_mutexes;
- AllMutexData() : all_mutexes(NULL) {}
+ AllMutexData() : all_mutexes(nullptr) {}
};
static struct AllMutexData gAllMutexData[kAllMutexDataSize];
@@ -114,7 +114,7 @@ class ScopedAllMutexesLock FINAL {
class ScopedContentionRecorder FINAL : public ValueObject {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
- : mutex_(kLogLockContentions ? mutex : NULL),
+ : mutex_(kLogLockContentions ? mutex : nullptr),
blocked_tid_(kLogLockContentions ? blocked_tid : 0),
owner_tid_(kLogLockContentions ? owner_tid : 0),
start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
@@ -144,7 +144,7 @@ BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(n
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
- if (*all_mutexes_ptr == NULL) {
+ if (*all_mutexes_ptr == nullptr) {
// We leak the global set of all mutexes to avoid ordering issues in global variable
// construction/destruction.
*all_mutexes_ptr = new std::set<BaseMutex*>();
@@ -165,7 +165,7 @@ void BaseMutex::DumpAll(std::ostream& os) {
os << "Mutex logging:\n";
ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
- if (all_mutexes == NULL) {
+ if (all_mutexes == nullptr) {
// No mutexes have been created yet during at startup.
return;
}
@@ -190,7 +190,7 @@ void BaseMutex::DumpAll(std::ostream& os) {
}
void BaseMutex::CheckSafeToWait(Thread* self) {
- if (self == NULL) {
+ if (self == nullptr) {
CheckUnattachedThread(level_);
return;
}
@@ -202,7 +202,7 @@ void BaseMutex::CheckSafeToWait(Thread* self) {
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
// We expect waits to happen while holding the thread list suspend thread lock.
- if (held_mutex != NULL) {
+ if (held_mutex != nullptr) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -354,7 +354,7 @@ Mutex::~Mutex() {
}
void Mutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -370,7 +370,7 @@ void Mutex::ExclusiveLock(Thread* self) {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
num_contenders_++;
- if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -397,7 +397,7 @@ void Mutex::ExclusiveLock(Thread* self) {
}
bool Mutex::ExclusiveTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -474,7 +474,7 @@ void Mutex::ExclusiveUnlock(Thread* self) {
if (LIKELY(done)) { // Spurious fail?
// Wake a contender.
if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
}
}
} else {
@@ -537,14 +537,14 @@ ReaderWriterMutex::~ReaderWriterMutex() {
// TODO: should we just not log at all if shutting down? this could be the logging mutex!
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
}
#endif
}
void ReaderWriterMutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertNotExclusiveHeld(self);
#if ART_USE_FUTEXES
bool done = false;
@@ -557,7 +557,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -578,7 +578,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
}
void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertExclusiveHeld(self);
RegisterAsUnlocked(self);
DCHECK_NE(exclusive_owner_, 0U);
@@ -598,7 +598,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
// Wake any waiters.
if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
num_pending_writers_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -613,7 +613,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
#if HAVE_TIMED_RWLOCK
bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
timespec end_abs_ts;
@@ -633,7 +633,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
}
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
--num_pending_writers_;
return false; // Timed out.
@@ -671,7 +671,7 @@ void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_sta
// Owner holds it exclusively, hang up.
ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
++num_pending_readers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
if (errno != EAGAIN) {
PLOG(FATAL) << "futex wait failed for " << name_;
}
@@ -681,7 +681,7 @@ void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_sta
#endif
bool ReaderWriterMutex::SharedTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -710,9 +710,9 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) {
}
bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result;
- if (UNLIKELY(self == NULL)) { // Handle unattached threads.
+ if (UNLIKELY(self == nullptr)) { // Handle unattached threads.
result = IsExclusiveHeld(self); // TODO: a better best effort here.
} else {
result = (self->GetHeldMutex(level_) == this);
@@ -770,14 +770,14 @@ ConditionVariable::~ConditionVariable() {
errno = rc;
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
+ bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
}
#endif
}
void ConditionVariable::Broadcast(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
// TODO: enable below, there's a race in thread creation that causes false failures currently.
// guard_.AssertExclusiveHeld(self);
DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
@@ -805,14 +805,14 @@ void ConditionVariable::Broadcast(Thread* self) {
}
void ConditionVariable::Signal(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
#if ART_USE_FUTEXES
if (num_waiters_ > 0) {
sequence_++; // Indicate a signal occurred.
// Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
// to avoid this, however, requeueing can only move all waiters.
- int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
// Check something was woken or else we changed sequence_ before they had chance to wait.
CHECK((num_woken == 0) || (num_woken == 1));
}
@@ -827,7 +827,7 @@ void ConditionVariable::Wait(Thread* self) {
}
void ConditionVariable::WaitHoldingLocks(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
unsigned int old_recursion_count = guard_.recursion_count_;
#if ART_USE_FUTEXES
@@ -837,7 +837,7 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) {
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
// Futex failed, check it is an expected error.
// EAGAIN == EWOULDBLK, so we let the caller try again.
// EINTR implies a signal was sent to this thread.
@@ -862,7 +862,7 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) {
}
bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool timed_out = false;
guard_.AssertExclusiveHeld(self);
guard_.CheckSafeToWait(self);
@@ -876,7 +876,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
// Timed out we're done.
timed_out = true;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 6e4b96c..f2be85e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -344,8 +344,8 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
// Assert the current thread has shared access to the ReaderWriterMutex.
void AssertSharedHeld(const Thread* self) {
if (kDebugLocking && (gAborting == 0)) {
- // TODO: we can only assert this well when self != NULL.
- CHECK(IsSharedHeld(self) || self == NULL) << *this;
+ // TODO: we can only assert this well when self != null.
+ CHECK(IsSharedHeld(self) || self == nullptr) << *this;
}
}
void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 289d3ef..3750c81 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -106,7 +106,7 @@ struct RecursiveLockWait {
state->mu.Lock(Thread::Current());
state->cv.Signal(Thread::Current());
state->mu.Unlock(Thread::Current());
- return NULL;
+ return nullptr;
}
Mutex mu;
@@ -120,14 +120,15 @@ static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS {
state.mu.Lock(Thread::Current());
pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread, NULL, RecursiveLockWait::Callback, &state);
+ int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
+ &state);
ASSERT_EQ(0, pthread_create_result);
state.cv.Wait(Thread::Current());
state.mu.Unlock(Thread::Current());
state.mu.Unlock(Thread::Current());
- EXPECT_EQ(pthread_join(pthread, NULL), 0);
+ EXPECT_EQ(pthread_join(pthread, nullptr), 0);
}
// This ensures we don't hang when waiting on a recursively locked mutex,
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 0e93eee..71e0590 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -31,7 +31,7 @@ bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
UNUSED(file_->FlushCloseOrErase()); // Ignore result.
}
file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
- if (file_.get() == NULL) {
+ if (file_.get() == nullptr) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
return false;
}
@@ -71,14 +71,15 @@ bool ScopedFlock::Init(File* file, std::string* error_msg) {
}
if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
file_.reset();
- *error_msg = StringPrintf("Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
+ *error_msg = StringPrintf(
+ "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
return false;
}
return true;
}
File* ScopedFlock::GetFile() {
- CHECK(file_.get() != NULL);
+ CHECK(file_.get() != nullptr);
return file_.get();
}
@@ -89,7 +90,7 @@ bool ScopedFlock::HasFile() {
ScopedFlock::ScopedFlock() { }
ScopedFlock::~ScopedFlock() {
- if (file_.get() != NULL) {
+ if (file_.get() != nullptr) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
CHECK_EQ(0, flock_result);
if (file_->FlushCloseOrErase() != 0) {
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index 3c5565c..901f25f 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -54,28 +54,30 @@ void STLDeleteContainerPointers(ForwardIterator begin,
// hash_set, or any other STL container which defines sensible begin(), end(),
// and clear() methods.
//
-// If container is NULL, this function is a no-op.
+// If container is null, this function is a no-op.
//
// As an alternative to calling STLDeleteElements() directly, consider
// using a container of std::unique_ptr, which ensures that your container's
// elements are deleted when the container goes out of scope.
template <class T>
void STLDeleteElements(T *container) {
- if (!container) return;
- STLDeleteContainerPointers(container->begin(), container->end());
- container->clear();
+ if (container != nullptr) {
+ STLDeleteContainerPointers(container->begin(), container->end());
+ container->clear();
+ }
}
// Given an STL container consisting of (key, value) pairs, STLDeleteValues
// deletes all the "value" components and clears the container. Does nothing
-// in the case it's given a NULL pointer.
+// in the case it's given a null pointer.
template <class T>
void STLDeleteValues(T *v) {
- if (!v) return;
- for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
- delete i->second;
+ if (v != nullptr) {
+ for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
+ delete i->second;
+ }
+ v->clear();
}
- v->clear();
}
template <class T>
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index 8655a9e..1d7596a 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -31,7 +31,7 @@ namespace art {
//
// struct VariantMap {
// template <typename TValue>
-// TValue* Get(Key<T> key); // nullptr if the value was never set, otherwise the value.
+// TValue* Get(Key<T> key); // null if the value was never set, otherwise the value.
//
// template <typename TValue>
// void Set(Key<T> key, TValue value);
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index f306a48..ccb22eb 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -18,7 +18,7 @@
#include "gtest/gtest.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(NULL));
+ static_cast<void*>(nullptr));
namespace art {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index c6940d3..30084d2 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -183,7 +183,7 @@ class ScopedCheck {
}
/*
- * Verify that the pointer value is non-NULL.
+ * Verify that the pointer value is non-null.
*/
bool CheckNonNull(const void* ptr) {
if (UNLIKELY(ptr == nullptr)) {
@@ -612,7 +612,7 @@ class ScopedCheck {
};
/*
- * Verify that "jobj" is a valid non-NULL object reference, and points to
+ * Verify that "jobj" is a valid non-null object reference, and points to
* an instance of expectedClass.
*
* Because we're looking at an object on the GC heap, we have to switch
@@ -941,7 +941,7 @@ class ScopedCheck {
}
}
/*
- * Verify that "array" is non-NULL and points to an Array object.
+ * Verify that "array" is non-null and points to an Array object.
*
* Since we're dealing with objects, switch to "running" mode.
*/
@@ -1277,7 +1277,7 @@ class GuardedCopy {
* Verify the guard area and, if "modOkay" is false, that the data itself
* has not been altered.
*
- * The caller has already checked that "dataBuf" is non-NULL.
+ * The caller has already checked that "dataBuf" is non-null.
*/
static bool Check(const char* function_name, const void* embedded_buf, bool mod_okay) {
const GuardedCopy* copy = FromEmbedded(embedded_buf);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 87d1c4c..1428749 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -60,7 +60,7 @@ inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
mirror::ArtMethod* referrer) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::String* resolved_string = declaring_class->GetDexCacheStrings()->Get(string_idx);
- if (UNLIKELY(resolved_string == NULL)) {
+ if (UNLIKELY(resolved_string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
@@ -92,7 +92,7 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtField* refe
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
- if (UNLIKELY(resolved_type == NULL)) {
+ if (UNLIKELY(resolved_type == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -146,7 +146,7 @@ inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::ArtMethod
bool is_static) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
- if (UNLIKELY(resolved_field == NULL)) {
+ if (UNLIKELY(resolved_field == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -196,7 +196,7 @@ inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
DCHECK(!class_roots_.IsNull());
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
- DCHECK(klass != NULL);
+ DCHECK(klass != nullptr);
return klass;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 85b245f..c344eb4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -110,7 +110,7 @@ static void ThrowEarlierClassFailure(mirror::Class* c)
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
self->SetException(pre_allocated);
} else {
- if (c->GetVerifyErrorClass() != NULL) {
+ if (c->GetVerifyErrorClass() != nullptr) {
// TODO: change the verifier to store an _instance_, with a useful detail message?
std::string temp;
self->ThrowNewException(c->GetVerifyErrorClass()->GetDescriptor(&temp),
@@ -2271,7 +2271,7 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl
// the right context. It does NOT become the class loader for the
// array class; that always comes from the base element class.
//
-// Returns nullptr with an exception raised on failure.
+// Returns null with an exception raised on failure.
mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader) {
// Identify the underlying component type
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d7c625d..8e27413 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -152,7 +152,7 @@ class ClassLinker {
const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
+ // Finds a class by its descriptor, returning null if it isn't wasn't loaded
// by the given 'class_loader'.
mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash,
mirror::ClassLoader* class_loader)
@@ -432,7 +432,7 @@ class ClassLinker {
void SetEntryPointsToInterpreter(mirror::ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Attempts to insert a class into a class table. Returns NULL if
+ // Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
@@ -444,7 +444,7 @@ class ClassLinker {
mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
- DCHECK(class_roots != NULL);
+ DCHECK(class_roots != nullptr);
return class_roots;
}
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index e17b885..7a711cc 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -81,7 +81,7 @@ ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
}
ScratchFile::ScratchFile(File* file) {
- CHECK(file != NULL);
+ CHECK(file != nullptr);
filename_ = file->GetPath();
file_.reset(file);
}
@@ -559,7 +559,7 @@ std::string CommonRuntimeTest::GetCoreFileLocation(const char* suffix) {
std::string location;
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != NULL);
+ CHECK(host_dir != nullptr);
location = StringPrintf("%s/framework/core.%s", host_dir, suffix);
} else {
location = StringPrintf("/data/art-test/core.%s", suffix);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 407746f..0808999 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -35,7 +35,7 @@ namespace art {
static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (referrer != NULL) {
+ if (referrer != nullptr) {
std::string location(referrer->GetLocation());
if (!location.empty()) {
os << " (declaration of '" << PrettyDescriptor(referrer)
@@ -45,10 +45,10 @@ static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
}
static void ThrowException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+ mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
- if (args != NULL) {
+ if (args != nullptr) {
std::string vmsg;
StringAppendV(&vmsg, fmt, *args);
msg << vmsg;
@@ -61,10 +61,10 @@ static void ThrowException(const char* exception_descriptor,
}
static void ThrowWrappedException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+ mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
- if (args != NULL) {
+ if (args != nullptr) {
std::string vmsg;
StringAppendV(&vmsg, fmt, *args);
msg << vmsg;
@@ -79,7 +79,7 @@ static void ThrowWrappedException(const char* exception_descriptor,
// AbstractMethodError
void ThrowAbstractMethodError(mirror::ArtMethod* method) {
- ThrowException("Ljava/lang/AbstractMethodError;", NULL,
+ ThrowException("Ljava/lang/AbstractMethodError;", nullptr,
StringPrintf("abstract method \"%s\"",
PrettyMethod(method).c_str()).c_str());
}
@@ -87,20 +87,20 @@ void ThrowAbstractMethodError(mirror::ArtMethod* method) {
// ArithmeticException
void ThrowArithmeticExceptionDivideByZero() {
- ThrowException("Ljava/lang/ArithmeticException;", NULL, "divide by zero");
+ ThrowException("Ljava/lang/ArithmeticException;", nullptr, "divide by zero");
}
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length) {
- ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
+ ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", nullptr,
StringPrintf("length=%d; index=%d", length, index).c_str());
}
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
- ThrowException("Ljava/lang/ArrayStoreException;", NULL,
+ ThrowException("Ljava/lang/ArrayStoreException;", nullptr,
StringPrintf("%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
PrettyDescriptor(array_class).c_str()).c_str());
@@ -109,14 +109,14 @@ void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
- ThrowException("Ljava/lang/ClassCastException;", NULL,
+ ThrowException("Ljava/lang/ClassCastException;", nullptr,
StringPrintf("%s cannot be cast to %s",
PrettyDescriptor(src_type).c_str(),
PrettyDescriptor(dest_type).c_str()).c_str());
}
void ThrowClassCastException(const char* msg) {
- ThrowException("Ljava/lang/ClassCastException;", NULL, msg);
+ ThrowException("Ljava/lang/ClassCastException;", nullptr, msg);
}
// ClassCircularityError
@@ -174,7 +174,7 @@ void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
ThrowException("Ljava/lang/IllegalAccessError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -188,13 +188,13 @@ void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) {
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg) {
- ThrowException("Ljava/lang/IllegalAccessException;", NULL, msg);
+ ThrowException("Ljava/lang/IllegalAccessException;", nullptr, msg);
}
// IllegalArgumentException
void ThrowIllegalArgumentException(const char* msg) {
- ThrowException("Ljava/lang/IllegalArgumentException;", NULL, msg);
+ ThrowException("Ljava/lang/IllegalArgumentException;", nullptr, msg);
}
@@ -207,7 +207,7 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun
msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
<< expected_type << " but instead was found to be of type " << found_type;
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -216,14 +216,14 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMetho
mirror::ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
- CHECK(this_object != NULL);
+ CHECK(this_object != nullptr);
std::ostringstream msg;
msg << "Class '" << PrettyDescriptor(this_object->GetClass())
<< "' does not implement interface '"
<< PrettyDescriptor(interface_method->GetDeclaringClass())
<< "' in call to '" << PrettyMethod(interface_method) << "'";
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -249,14 +249,14 @@ void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt,
void ThrowIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException("Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowException("Ljava/io/IOException;", nullptr, fmt, &args);
va_end(args);
}
void ThrowWrappedIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowWrappedException("Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowWrappedException("Ljava/io/IOException;", nullptr, fmt, &args);
va_end(args);
}
@@ -272,12 +272,12 @@ void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) {
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size) {
- ThrowException("Ljava/lang/NegativeArraySizeException;", NULL,
+ ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr,
StringPrintf("%d", size).c_str());
}
void ThrowNegativeArraySizeException(const char* msg) {
- ThrowException("Ljava/lang/NegativeArraySizeException;", NULL, msg);
+ ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr, msg);
}
// NoSuchFieldError
@@ -319,7 +319,7 @@ void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) {
std::ostringstream msg;
msg << "Attempt to " << (is_read ? "read from" : "write to")
<< " field '" << PrettyField(field, true) << "' on a null object reference";
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
}
static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
@@ -329,7 +329,7 @@ static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
}
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
@@ -379,7 +379,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// method is invoked at this location.
mirror::ArtMethod* invoked_method =
verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc);
- if (invoked_method != NULL) {
+ if (invoked_method != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForMethodAccess(invoked_method, kVirtual);
} else {
@@ -411,7 +411,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// field is accessed at this location.
ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
- if (field != NULL) {
+ if (field != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
} else {
@@ -443,7 +443,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// field is accessed at this location.
ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
- if (field != NULL) {
+ if (field != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
} else {
@@ -459,7 +459,7 @@ void ThrowNullPointerExceptionFromDexPC() {
case Instruction::AGET_BYTE:
case Instruction::AGET_CHAR:
case Instruction::AGET_SHORT:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to read from null array");
break;
case Instruction::APUT:
@@ -469,11 +469,11 @@ void ThrowNullPointerExceptionFromDexPC() {
case Instruction::APUT_BYTE:
case Instruction::APUT_CHAR:
case Instruction::APUT_SHORT:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to write to null array");
break;
case Instruction::ARRAY_LENGTH:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to get length of null array");
break;
default: {
@@ -481,7 +481,7 @@ void ThrowNullPointerExceptionFromDexPC() {
// message/logging is so we can improve any cases we've missed in the future.
const DexFile* dex_file =
method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
StringPrintf("Null pointer exception during instruction '%s'",
instr->DumpString(dex_file).c_str()).c_str());
break;
@@ -490,7 +490,7 @@ void ThrowNullPointerExceptionFromDexPC() {
}
void ThrowNullPointerException(const char* msg) {
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg);
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg);
}
// RuntimeException
@@ -498,7 +498,7 @@ void ThrowNullPointerException(const char* msg) {
void ThrowRuntimeException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException("Ljava/lang/RuntimeException;", NULL, fmt, &args);
+ ThrowException("Ljava/lang/RuntimeException;", nullptr, fmt, &args);
va_end(args);
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c074b54..f3ce552 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -174,7 +174,8 @@ class AllocRecord {
jobject type_; // This is a weak global.
size_t byte_count_;
uint16_t thin_lock_id_;
- AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have nullptr method.
+ // Unused entries have null method.
+ AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];
};
class Breakpoint {
@@ -714,7 +715,7 @@ std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
if (o == nullptr) {
if (error == JDWP::ERR_NONE) {
- return "NULL";
+ return "null";
} else {
return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
}
@@ -727,7 +728,7 @@ std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
std::string Dbg::GetClassName(mirror::Class* klass) {
if (klass == nullptr) {
- return "NULL";
+ return "null";
}
std::string temp;
return DescriptorToName(klass->GetDescriptor(&temp));
@@ -1409,7 +1410,7 @@ void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, ui
std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
mirror::ArtMethod* m = FromMethodId(method_id);
if (m == nullptr) {
- return "NULL";
+ return "null";
}
return m->GetName();
}
@@ -1417,7 +1418,7 @@ std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
ArtField* f = FromFieldId(field_id);
if (f == nullptr) {
- return "NULL";
+ return "null";
}
return f->GetName();
}
@@ -1721,7 +1722,7 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje
if (receiver_class == nullptr && o != nullptr) {
receiver_class = o->GetClass();
}
- // TODO: should we give up now if receiver_class is nullptr?
+ // TODO: should we give up now if receiver_class is null?
if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
return JDWP::ERR_INVALID_FIELDID;
@@ -2176,7 +2177,7 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>*
}
mirror::Object* peer = t->GetPeer();
if (peer == nullptr) {
- // peer might be NULL if the thread is still starting up. We can't tell the debugger about
+ // peer might be null if the thread is still starting up. We can't tell the debugger about
// this thread yet.
// TODO: if we identified threads to the debugger by their Thread*
// rather than their peer's mirror::Object*, we could fix this.
@@ -3390,7 +3391,7 @@ bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::Art
}
bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
- // The upcall can be nullptr and in that case we don't need to do anything.
+ // The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
}
@@ -3427,7 +3428,7 @@ bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror:
}
bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) {
- // The upcall can be nullptr and in that case we don't need to do anything.
+ // The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index c287121..fe90eb6 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -68,7 +68,7 @@ struct DebugInvokeReq {
GcRoot<mirror::Class> klass;
GcRoot<mirror::ArtMethod> method;
const uint32_t arg_count;
- uint64_t* const arg_values; // will be NULL if arg_count_ == 0
+ uint64_t* const arg_values; // will be null if arg_count_ == 0
const uint32_t options;
/* result */
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index c68fdca..760006a 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -32,7 +32,7 @@ inline int32_t DexFile::GetStringLength(const StringId& string_id) const {
inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
uint32_t* utf16_length) const {
- DCHECK(utf16_length != NULL) << GetLocation();
+ DCHECK(utf16_length != nullptr) << GetLocation();
const uint8_t* ptr = begin_ + string_id.string_data_off_;
*utf16_length = DecodeUnsignedLeb128(&ptr);
return reinterpret_cast<const char*>(ptr);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 03a47a3..0589cdd 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -57,7 +57,7 @@ const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
const uint8_t DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
- CHECK(magic != NULL);
+ CHECK(magic != nullptr);
ScopedFd fd(open(filename, O_RDONLY, 0));
if (fd.get() == -1) {
*error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
@@ -77,7 +77,7 @@ static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string*
}
bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
- CHECK(checksum != NULL);
+ CHECK(checksum != nullptr);
uint32_t magic;
// Strip ":...", which is the location
@@ -98,14 +98,15 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
return false;
}
if (IsZipMagic(magic)) {
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
- if (zip_archive.get() == NULL) {
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
+ if (zip_archive.get() == nullptr) {
*error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", file_part,
error_msg->c_str());
return false;
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name, error_msg));
- if (zip_entry.get() == NULL) {
+ if (zip_entry.get() == nullptr) {
*error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", file_part,
zip_entry_name, error_msg->c_str());
return false;
@@ -114,8 +115,9 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
return true;
}
if (IsDexMagic(magic)) {
- std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.release(), filename, false, error_msg));
- if (dex_file.get() == NULL) {
+ std::unique_ptr<const DexFile> dex_file(
+ DexFile::OpenFile(fd.release(), filename, false, error_msg));
+ if (dex_file.get() == nullptr) {
return false;
}
*checksum = dex_file->GetHeader().checksum_;
@@ -127,7 +129,7 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
bool DexFile::Open(const char* filename, const char* location, std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
uint32_t magic;
ScopedFd fd(OpenAndReadMagic(filename, &magic, error_msg));
if (fd.get() == -1) {
@@ -152,7 +154,7 @@ bool DexFile::Open(const char* filename, const char* location, std::string* erro
}
int DexFile::GetPermissions() const {
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return 0;
} else {
return mem_map_->GetProtect();
@@ -165,7 +167,7 @@ bool DexFile::IsReadOnly() const {
bool DexFile::EnableWrite() const {
CHECK(IsReadOnly());
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return false;
} else {
return mem_map_->Protect(PROT_READ | PROT_WRITE);
@@ -174,7 +176,7 @@ bool DexFile::EnableWrite() const {
bool DexFile::DisableWrite() const {
CHECK(!IsReadOnly());
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return false;
} else {
return mem_map_->Protect(PROT_READ);
@@ -233,7 +235,7 @@ const char* DexFile::kClassesDex = "classes.dex";
bool DexFile::OpenZip(int fd, const std::string& location, std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
if (zip_archive.get() == nullptr) {
DCHECK(!error_msg->empty());
@@ -260,12 +262,12 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, cons
ZipOpenErrorCode* error_code) {
CHECK(!location.empty());
std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
- if (zip_entry.get() == NULL) {
+ if (zip_entry.get() == nullptr) {
*error_code = ZipOpenErrorCode::kEntryNotFound;
return nullptr;
}
std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- if (map.get() == NULL) {
+ if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
*error_code = ZipOpenErrorCode::kExtractToMemoryError;
@@ -297,7 +299,7 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, cons
bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& location,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(Open(zip_archive, kClassesDex, location, error_msg,
&error_code));
@@ -371,7 +373,7 @@ DexFile::DexFile(const uint8_t* base, size_t size,
find_class_def_misses_(0),
class_def_index_(nullptr),
oat_dex_file_(oat_dex_file) {
- CHECK(begin_ != NULL) << GetLocation();
+ CHECK(begin_ != nullptr) << GetLocation();
CHECK_GT(size_, 0U) << GetLocation();
}
@@ -487,7 +489,7 @@ const DexFile::ClassDef* DexFile::FindClassDef(uint16_t type_idx) const {
return &class_def;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
@@ -522,7 +524,7 @@ const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_kl
}
}
}
- return NULL;
+ return nullptr;
}
const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_klass,
@@ -557,7 +559,7 @@ const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_
}
}
}
- return NULL;
+ return nullptr;
}
const DexFile::StringId* DexFile::FindStringId(const char* string) const {
@@ -576,7 +578,7 @@ const DexFile::StringId* DexFile::FindStringId(const char* string) const {
return &str_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t length) const {
@@ -595,7 +597,7 @@ const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t le
return &str_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
@@ -612,7 +614,7 @@ const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
return &type_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
@@ -648,7 +650,7 @@ const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
return &proto;
}
}
- return NULL;
+ return nullptr;
}
// Given a signature place the type ids into the given vector
@@ -687,11 +689,11 @@ bool DexFile::CreateTypeList(const StringPiece& signature, uint16_t* return_type
// TODO: avoid creating a std::string just to get a 0-terminated char array
std::string descriptor(signature.data() + start_offset, offset - start_offset);
const DexFile::StringId* string_id = FindStringId(descriptor.c_str());
- if (string_id == NULL) {
+ if (string_id == nullptr) {
return false;
}
const DexFile::TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
- if (type_id == NULL) {
+ if (type_id == nullptr) {
return false;
}
uint16_t type_idx = GetIndexForTypeId(*type_id);
@@ -713,7 +715,7 @@ const Signature DexFile::CreateSignature(const StringPiece& signature) const {
return Signature::NoSignature();
}
const ProtoId* proto_id = FindProtoId(return_type_idx, param_type_indices);
- if (proto_id == NULL) {
+ if (proto_id == nullptr) {
return Signature::NoSignature();
}
return Signature(this, *proto_id);
@@ -727,12 +729,12 @@ int32_t DexFile::GetLineNumFromPC(mirror::ArtMethod* method, uint32_t rel_pc) co
}
const CodeItem* code_item = GetCodeItem(method->GetCodeItemOffset());
- DCHECK(code_item != NULL) << PrettyMethod(method) << " " << GetLocation();
+ DCHECK(code_item != nullptr) << PrettyMethod(method) << " " << GetLocation();
// A method with no line number info should return -1
LineNumFromPcContext context(rel_pc, -1);
DecodeDebugInfo(code_item, method->IsStatic(), method->GetDexMethodIndex(), LineNumForPcCb,
- NULL, &context);
+ nullptr, &context);
return context.line_num_;
}
@@ -771,19 +773,20 @@ int32_t DexFile::FindCatchHandlerOffset(const CodeItem &code_item, uint32_t addr
void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context, const uint8_t* stream, LocalInfo* local_in_reg) const {
+ void* context, const uint8_t* stream, LocalInfo* local_in_reg)
+ const {
uint32_t line = DecodeUnsignedLeb128(&stream);
uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
uint32_t address = 0;
- bool need_locals = (local_cb != NULL);
+ bool need_locals = (local_cb != nullptr);
if (!is_static) {
if (need_locals) {
const char* descriptor = GetMethodDeclaringClassDescriptor(GetMethodId(method_idx));
local_in_reg[arg_reg].name_ = "this";
local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = NULL;
+ local_in_reg[arg_reg].signature_ = nullptr;
local_in_reg[arg_reg].start_address_ = 0;
local_in_reg[arg_reg].is_live_ = true;
}
@@ -803,7 +806,7 @@ void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32
const char* name = StringDataByIdx(id);
local_in_reg[arg_reg].name_ = name;
local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = NULL;
+ local_in_reg[arg_reg].signature_ = nullptr;
local_in_reg[arg_reg].start_address_ = address;
local_in_reg[arg_reg].is_live_ = true;
}
@@ -895,7 +898,7 @@ void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32
}
if (need_locals) {
- if (local_in_reg[reg].name_ == NULL || local_in_reg[reg].descriptor_ == NULL) {
+ if (local_in_reg[reg].name_ == nullptr || local_in_reg[reg].descriptor_ == nullptr) {
LOG(ERROR) << "invalid stream - no name or descriptor in " << GetLocation();
return;
}
@@ -920,7 +923,7 @@ void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32
address += adjopcode / DBG_LINE_RANGE;
line += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
- if (position_cb != NULL) {
+ if (position_cb != nullptr) {
if (position_cb(context, address, line)) {
// early exit
return;
@@ -937,14 +940,16 @@ void DexFile::DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_
void* context) const {
DCHECK(code_item != nullptr);
const uint8_t* stream = GetDebugInfoStream(code_item);
- std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != NULL ?
+ std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != nullptr ?
new LocalInfo[code_item->registers_size_] :
- NULL);
- if (stream != NULL) {
- DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream, &local_in_reg[0]);
+ nullptr);
+ if (stream != nullptr) {
+ DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream,
+ &local_in_reg[0]);
}
for (int reg = 0; reg < code_item->registers_size_; reg++) {
- InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0], local_cb);
+ InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0],
+ local_cb);
}
}
@@ -1051,7 +1056,7 @@ std::ostream& operator<<(std::ostream& os, const Signature& sig) {
// Decodes the header section from the class data bytes.
void ClassDataItemIterator::ReadClassDataHeader() {
- CHECK(ptr_pos_ != NULL);
+ CHECK(ptr_pos_ != nullptr);
header_.static_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
header_.instance_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
header_.direct_methods_size_ = DecodeUnsignedLeb128(&ptr_pos_);
@@ -1129,17 +1134,16 @@ static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_ri
return val;
}
-EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
- Handle<mirror::DexCache>* dex_cache,
- Handle<mirror::ClassLoader>* class_loader,
- ClassLinker* linker,
- const DexFile::ClassDef& class_def)
+EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
+ const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker,
+ const DexFile::ClassDef& class_def)
: dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
array_size_(), pos_(-1), type_(kByte) {
DCHECK(dex_cache != nullptr);
DCHECK(class_loader != nullptr);
ptr_ = dex_file.GetEncodedStaticFieldValuesArray(class_def);
- if (ptr_ == NULL) {
+ if (ptr_ == nullptr) {
array_size_ = 0;
} else {
array_size_ = DecodeUnsignedLeb128(&ptr_);
@@ -1199,7 +1203,7 @@ void EncodedStaticFieldValueIterator::Next() {
UNIMPLEMENTED(FATAL) << ": type " << type_;
UNREACHABLE();
case kNull:
- jval_.l = NULL;
+ jval_.l = nullptr;
width = 0;
break;
default:
@@ -1212,7 +1216,8 @@ void EncodedStaticFieldValueIterator::Next() {
template<bool kTransactionActive>
void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const {
switch (type_) {
- case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); break;
+ case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z);
+ break;
case kByte: field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break;
case kShort: field->SetShort<kTransactionActive>(field->GetDeclaringClass(), jval_.s); break;
case kChar: field->SetChar<kTransactionActive>(field->GetDeclaringClass(), jval_.c); break;
@@ -1220,7 +1225,7 @@ void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const {
case kLong: field->SetLong<kTransactionActive>(field->GetDeclaringClass(), jval_.j); break;
case kFloat: field->SetFloat<kTransactionActive>(field->GetDeclaringClass(), jval_.f); break;
case kDouble: field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break;
- case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), NULL); break;
+ case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), nullptr); break;
case kString: {
mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_);
field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved);
@@ -1275,7 +1280,7 @@ void CatchHandlerIterator::Init(const DexFile::CodeItem& code_item,
Init(DexFile::GetCatchHandlerData(code_item, offset));
} else {
// Not found, initialize as empty
- current_data_ = NULL;
+ current_data_ = nullptr;
remaining_count_ = -1;
catch_all_ = false;
DCHECK(!HasNext());
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 5bdd9b6..0d07358 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -394,7 +394,7 @@ class DexFile {
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
std::string* error_msg) {
- return OpenMemory(base, size, location, location_checksum, NULL, oat_dex_file, error_msg);
+ return OpenMemory(base, size, location, location_checksum, nullptr, oat_dex_file, error_msg);
}
// Open all classesXXX.dex files from a zip archive.
@@ -448,7 +448,7 @@ class DexFile {
}
const Header& GetHeader() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return *header_;
}
@@ -463,7 +463,7 @@ class DexFile {
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->string_ids_size_;
}
@@ -495,7 +495,7 @@ class DexFile {
const char* StringDataAndUtf16LengthByIdx(uint32_t idx, uint32_t* utf16_length) const {
if (idx == kDexNoIndex) {
*utf16_length = 0;
- return NULL;
+ return nullptr;
}
const StringId& string_id = GetStringId(idx);
return GetStringDataAndUtf16Length(string_id, utf16_length);
@@ -514,7 +514,7 @@ class DexFile {
// Returns the number of type identifiers in the .dex file.
uint32_t NumTypeIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->type_ids_size_;
}
@@ -553,7 +553,7 @@ class DexFile {
// Returns the number of field identifiers in the .dex file.
size_t NumFieldIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->field_ids_size_;
}
@@ -593,7 +593,7 @@ class DexFile {
// Returns the number of method identifiers in the .dex file.
size_t NumMethodIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->method_ids_size_;
}
@@ -643,7 +643,7 @@ class DexFile {
}
// Returns the number of class definitions in the .dex file.
uint32_t NumClassDefs() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->class_defs_size_;
}
@@ -673,7 +673,7 @@ class DexFile {
const TypeList* GetInterfacesList(const ClassDef& class_def) const {
if (class_def.interfaces_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
const uint8_t* addr = begin_ + class_def.interfaces_off_;
return reinterpret_cast<const TypeList*>(addr);
@@ -683,7 +683,7 @@ class DexFile {
// Returns a pointer to the raw memory mapped class_data_item
const uint8_t* GetClassData(const ClassDef& class_def) const {
if (class_def.class_data_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
return begin_ + class_def.class_data_off_;
}
@@ -692,7 +692,7 @@ class DexFile {
//
const CodeItem* GetCodeItem(const uint32_t code_off) const {
if (code_off == 0) {
- return NULL; // native or abstract method
+ return nullptr; // native or abstract method
} else {
const uint8_t* addr = begin_ + code_off;
return reinterpret_cast<const CodeItem*>(addr);
@@ -705,7 +705,7 @@ class DexFile {
// Returns the number of prototype identifiers in the .dex file.
size_t NumProtoIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->proto_ids_size_;
}
@@ -745,7 +745,7 @@ class DexFile {
const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
if (proto_id.parameters_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
const uint8_t* addr = begin_ + proto_id.parameters_off_;
return reinterpret_cast<const TypeList*>(addr);
@@ -778,7 +778,7 @@ class DexFile {
// Get the pointer to the start of the debugging data
const uint8_t* GetDebugInfoStream(const CodeItem* code_item) const {
if (code_item->debug_info_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
return begin_ + code_item->debug_info_off_;
}
@@ -818,7 +818,8 @@ class DexFile {
struct LocalInfo {
LocalInfo()
- : name_(NULL), descriptor_(NULL), signature_(NULL), start_address_(0), is_live_(false) {}
+ : name_(nullptr), descriptor_(nullptr), signature_(nullptr), start_address_(0),
+ is_live_(false) {}
const char* name_; // E.g., list
const char* descriptor_; // E.g., Ljava/util/LinkedList;
@@ -841,10 +842,10 @@ class DexFile {
void InvokeLocalCbIfLive(void* context, int reg, uint32_t end_address,
LocalInfo* local_in_reg, DexDebugNewLocalCb local_cb) const {
- if (local_cb != NULL && local_in_reg[reg].is_live_) {
+ if (local_cb != nullptr && local_in_reg[reg].is_live_) {
local_cb(context, reg, local_in_reg[reg].start_address_, end_address,
local_in_reg[reg].name_, local_in_reg[reg].descriptor_,
- local_in_reg[reg].signature_ != NULL ? local_in_reg[reg].signature_ : "");
+ local_in_reg[reg].signature_ != nullptr ? local_in_reg[reg].signature_ : "");
}
}
@@ -865,7 +866,7 @@ class DexFile {
const char* GetSourceFile(const ClassDef& class_def) const {
if (class_def.source_file_idx_ == 0xffffffff) {
- return NULL;
+ return nullptr;
} else {
return StringDataByIdx(class_def.source_file_idx_);
}
@@ -926,7 +927,7 @@ class DexFile {
kVerifyError
};
- // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-nullptr
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
// return.
static std::unique_ptr<const DexFile> Open(const ZipArchive& zip_archive, const char* entry_name,
const std::string& location, std::string* error_msg,
@@ -1055,7 +1056,7 @@ class DexFileParameterIterator {
DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id)
: dex_file_(dex_file), size_(0), pos_(0) {
type_list_ = dex_file_.GetProtoParameters(proto_id);
- if (type_list_ != NULL) {
+ if (type_list_ != nullptr) {
size_ = type_list_->Size();
}
}
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 09ef3ee..4d099e1 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -33,7 +33,7 @@ class DexFileTest : public CommonRuntimeTest {};
TEST_F(DexFileTest, Open) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> dex(OpenTestDexFile("Nested"));
- ASSERT_TRUE(dex.get() != NULL);
+ ASSERT_TRUE(dex.get() != nullptr);
}
static const uint8_t kBase64Map[256] = {
@@ -136,14 +136,14 @@ static const char kRawDex[] =
static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
const char* location) {
// decode base64
- CHECK(base64 != NULL);
+ CHECK(base64 != nullptr);
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -168,7 +168,7 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
TEST_F(DexFileTest, Header) {
ScratchFile tmp;
std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
- ASSERT_TRUE(raw.get() != NULL);
+ ASSERT_TRUE(raw.get() != nullptr);
const DexFile::Header& header = raw->GetHeader();
// TODO: header.magic_
@@ -234,7 +234,7 @@ TEST_F(DexFileTest, GetMethodSignature) {
ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
const uint8_t* class_data = raw->GetClassData(class_def);
- ASSERT_TRUE(class_data != NULL);
+ ASSERT_TRUE(class_data != nullptr);
ClassDataItemIterator it(*raw, class_data);
EXPECT_EQ(1u, it.NumDirectMethods());
@@ -281,8 +281,8 @@ TEST_F(DexFileTest, FindStringId) {
EXPECT_EQ(1U, raw->NumClassDefs());
const char* strings[] = { "LGetMethodSignature;", "Ljava/lang/Float;", "Ljava/lang/Object;",
- "D", "I", "J", NULL };
- for (size_t i = 0; strings[i] != NULL; i++) {
+ "D", "I", "J", nullptr };
+ for (size_t i = 0; strings[i] != nullptr; i++) {
const char* str = strings[i];
const DexFile::StringId* str_id = raw->FindStringId(str);
const char* dex_str = raw->GetStringData(*str_id);
@@ -294,10 +294,10 @@ TEST_F(DexFileTest, FindTypeId) {
for (size_t i = 0; i < java_lang_dex_file_->NumTypeIds(); i++) {
const char* type_str = java_lang_dex_file_->StringByTypeIdx(i);
const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
- ASSERT_TRUE(type_str_id != NULL);
+ ASSERT_TRUE(type_str_id != nullptr);
uint32_t type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
- ASSERT_TRUE(type_id != NULL);
+ ASSERT_TRUE(type_id != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id), i);
}
}
@@ -307,14 +307,14 @@ TEST_F(DexFileTest, FindProtoId) {
const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(i);
const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
std::vector<uint16_t> to_find_types;
- if (to_find_tl != NULL) {
+ if (to_find_tl != nullptr) {
for (size_t j = 0; j < to_find_tl->Size(); j++) {
to_find_types.push_back(to_find_tl->GetTypeItem(j).type_idx_);
}
}
const DexFile::ProtoId* found =
java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
- ASSERT_TRUE(found != NULL);
+ ASSERT_TRUE(found != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), i);
}
}
@@ -326,7 +326,7 @@ TEST_F(DexFileTest, FindMethodId) {
const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
- ASSERT_TRUE(found != NULL) << "Didn't find method " << i << ": "
+ ASSERT_TRUE(found != nullptr) << "Didn't find method " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name)
<< java_lang_dex_file_->GetMethodSignature(to_find);
@@ -341,7 +341,7 @@ TEST_F(DexFileTest, FindFieldId) {
const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
const DexFile::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
const DexFile::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
- ASSERT_TRUE(found != NULL) << "Didn't find field " << i << ": "
+ ASSERT_TRUE(found != nullptr) << "Didn't find field " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.type_idx_) << " "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index a3f3de8..2603975 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -1473,7 +1473,7 @@ bool DexFileVerifier::CheckInterStringIdItem() {
}
// Check ordering between items.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::StringId* prev_item = reinterpret_cast<const DexFile::StringId*>(previous_item_);
const char* prev_str = dex_file_->GetStringData(*prev_item);
const char* str = dex_file_->GetStringData(*item);
@@ -1499,7 +1499,7 @@ bool DexFileVerifier::CheckInterTypeIdItem() {
}
// Check ordering between items.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
ErrorStringPrintf("Out-of-order type_ids: %x then %x",
@@ -1548,7 +1548,7 @@ bool DexFileVerifier::CheckInterProtoIdItem() {
}
// Check ordering between items. This relies on type_ids being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::ProtoId* prev = reinterpret_cast<const DexFile::ProtoId*>(previous_item_);
if (UNLIKELY(prev->return_type_idx_ > item->return_type_idx_)) {
ErrorStringPrintf("Out-of-order proto_id return types");
@@ -1610,7 +1610,7 @@ bool DexFileVerifier::CheckInterFieldIdItem() {
}
// Check ordering between items. This relies on the other sections being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::FieldId* prev_item = reinterpret_cast<const DexFile::FieldId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order field_ids");
@@ -1657,7 +1657,7 @@ bool DexFileVerifier::CheckInterMethodIdItem() {
}
// Check ordering between items. This relies on the other sections being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::MethodId* prev_item = reinterpret_cast<const DexFile::MethodId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order method_ids");
@@ -1728,7 +1728,7 @@ bool DexFileVerifier::CheckInterClassDefItem() {
}
const DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
- if (interfaces != NULL) {
+ if (interfaces != nullptr) {
uint32_t size = interfaces->Size();
// Ensure that all interfaces refer to classes (not arrays or primitives).
@@ -1952,7 +1952,7 @@ bool DexFileVerifier::CheckInterSectionIterate(size_t offset, uint32_t count, ui
}
// Iterate through the items in the section.
- previous_item_ = NULL;
+ previous_item_ = nullptr;
for (uint32_t i = 0; i < count; i++) {
uint32_t new_offset = (offset + alignment_mask) & ~alignment_mask;
ptr_ = begin_ + new_offset;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 18bf2e7..877dfc2 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -36,7 +36,7 @@ class DexFileVerifier {
private:
DexFileVerifier(const DexFile* dex_file, const uint8_t* begin, size_t size, const char* location)
: dex_file_(dex_file), begin_(begin), size_(size), location_(location),
- header_(&dex_file->GetHeader()), ptr_(NULL), previous_item_(NULL) {
+ header_(&dex_file->GetHeader()), ptr_(nullptr), previous_item_(nullptr) {
}
bool Verify();
@@ -99,12 +99,12 @@ class DexFileVerifier {
bool CheckInterSection();
// Load a string by (type) index. Checks whether the index is in bounds, printing the error if
- // not. If there is an error, nullptr is returned.
+ // not. If there is an error, null is returned.
const char* CheckLoadStringByIdx(uint32_t idx, const char* error_fmt);
const char* CheckLoadStringByTypeIdx(uint32_t type_idx, const char* error_fmt);
// Load a field/method Id by index. Checks whether the index is in bounds, printing the error if
- // not. If there is an error, nullptr is returned.
+ // not. If there is an error, null is returned.
const DexFile::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
const DexFile::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 00ca8a9..95a47cc 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -105,14 +105,14 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
const char* location,
std::string* error_msg) {
// decode base64
- CHECK(base64 != NULL);
+ CHECK(base64 != nullptr);
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -178,7 +178,7 @@ static std::unique_ptr<const DexFile> FixChecksumAndOpen(uint8_t* bytes, size_t
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(bytes, length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -205,7 +205,7 @@ static bool ModifyAndLoad(const char* location, size_t offset, uint8_t new_val,
// Decode base64.
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// Make modifications.
dex_bytes.get()[offset] = new_val;
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index d3b9eb4..c64c21e 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -183,7 +183,7 @@ class Instruction {
// Reads an instruction out of the stream at the specified address.
static const Instruction* At(const uint16_t* code) {
- DCHECK(code != NULL);
+ DCHECK(code != nullptr);
return reinterpret_cast<const Instruction*>(code);
}
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index 14e316f..7fae277 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -30,8 +30,8 @@ class DexMethodIterator {
found_next_(false),
dex_file_index_(0),
class_def_index_(0),
- class_def_(NULL),
- class_data_(NULL),
+ class_def_(nullptr),
+ class_data_(nullptr),
direct_method_(false) {
CHECK_NE(0U, dex_files_.size());
}
@@ -51,20 +51,20 @@ class DexMethodIterator {
dex_file_index_++;
continue;
}
- if (class_def_ == NULL) {
+ if (class_def_ == nullptr) {
class_def_ = &GetDexFileInternal().GetClassDef(class_def_index_);
}
- if (class_data_ == NULL) {
+ if (class_data_ == nullptr) {
class_data_ = GetDexFileInternal().GetClassData(*class_def_);
- if (class_data_ == NULL) {
+ if (class_data_ == nullptr) {
// empty class, such as a marker interface
// End of this class, advance and retry.
- class_def_ = NULL;
+ class_def_ = nullptr;
class_def_index_++;
continue;
}
}
- if (it_.get() == NULL) {
+ if (it_.get() == nullptr) {
it_.reset(new ClassDataItemIterator(GetDexFileInternal(), class_data_));
// Skip fields
while (GetIterator().HasNextStaticField()) {
@@ -88,16 +88,16 @@ class DexMethodIterator {
}
// End of this class, advance and retry.
DCHECK(!GetIterator().HasNext());
- it_.reset(NULL);
- class_data_ = NULL;
- class_def_ = NULL;
+ it_.reset(nullptr);
+ class_data_ = nullptr;
+ class_def_ = nullptr;
class_def_index_++;
}
}
void Next() {
found_next_ = false;
- if (it_.get() != NULL) {
+ if (it_.get() != nullptr) {
// Advance to next method if we currently are looking at a class.
GetIterator().Next();
}
@@ -115,20 +115,20 @@ class DexMethodIterator {
InvokeType GetInvokeType() {
CHECK(HasNext());
- CHECK(class_def_ != NULL);
+ CHECK(class_def_ != nullptr);
return GetIterator().GetMethodInvokeType(*class_def_);
}
private:
ClassDataItemIterator& GetIterator() const {
- CHECK(it_.get() != NULL);
+ CHECK(it_.get() != nullptr);
return *it_.get();
}
const DexFile& GetDexFileInternal() const {
CHECK_LT(dex_file_index_, dex_files_.size());
const DexFile* dex_file = dex_files_[dex_file_index_];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
return *dex_file;
}
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 8969e29..e909e64 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1026,13 +1026,13 @@ typename ElfTypes::Dyn* ElfFileImpl<ElfTypes>::FindDynamicByType(Elf_Sword type)
return dyn;
}
}
- return NULL;
+ return nullptr;
}
template <typename ElfTypes>
typename ElfTypes::Word ElfFileImpl<ElfTypes>::FindDynamicValueByType(Elf_Sword type) const {
Elf_Dyn* dyn = FindDynamicByType(type);
- if (dyn == NULL) {
+ if (dyn == nullptr) {
return 0;
} else {
return dyn->d_un.d_val;
@@ -1567,7 +1567,7 @@ bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) {
CHECK_NE(0U, section_headers.size());
CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
- // section 0 is the NULL section, sections start at offset of first section
+ // section 0 is the null section, sections start at offset of first section
CHECK(GetSectionHeader(1) != nullptr);
Elf_Off offset = GetSectionHeader(1)->sh_offset;
for (size_t i = 1; i < section_headers.size(); i++) {
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 047849a..80950c6 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -94,8 +94,7 @@ class ElfFileImpl {
const std::string& symbol_name,
bool build_map);
- // Lookup a string given string section and offset. Returns nullptr for
- // special 0 offset.
+ // Lookup a string given string section and offset. Returns null for special 0 offset.
const char* GetString(Elf_Shdr&, Elf_Word) const;
Elf_Word GetDynamicNum() const;
@@ -167,7 +166,7 @@ class ElfFileImpl {
// Check whether the offset is in range, and set to target to Begin() + offset if OK.
bool CheckAndSet(Elf32_Off offset, const char* label, uint8_t** target, std::string* error_msg);
- // Find symbol in specified table, returning nullptr if it is not found.
+ // Find symbol in specified table, returning null if it is not found.
//
// If build_map is true, builds a map to speed repeated access. The
// map does not included untyped symbol values (aka STT_NOTYPE)
@@ -184,7 +183,7 @@ class ElfFileImpl {
Elf_Dyn* FindDynamicByType(Elf_Sword type) const;
Elf_Word FindDynamicValueByType(Elf_Sword type) const;
- // Lookup a string by section type. Returns nullptr for special 0 offset.
+ // Lookup a string by section type. Returns null for special 0 offset.
const char* GetString(Elf_Word section_type, Elf_Word) const;
const File* const file_;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index cbfba12..64b7ecd 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -41,10 +41,10 @@ inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self, bool* slow_path) {
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
- if (UNLIKELY(klass == NULL)) {
+ if (UNLIKELY(klass == nullptr)) {
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
*slow_path = true;
- if (klass == NULL) {
+ if (klass == nullptr) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
} else {
@@ -526,19 +526,19 @@ inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
mirror::Object* this_object,
mirror::ArtMethod* referrer,
bool access_check, InvokeType type) {
- if (UNLIKELY(this_object == NULL && type != kStatic)) {
- return NULL;
+ if (UNLIKELY(this_object == nullptr && type != kStatic)) {
+ return nullptr;
}
mirror::ArtMethod* resolved_method =
referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx);
- if (UNLIKELY(resolved_method == NULL)) {
- return NULL;
+ if (UNLIKELY(resolved_method == nullptr)) {
+ return nullptr;
}
if (access_check) {
// Check for incompatible class change errors and access.
bool icce = resolved_method->CheckIncompatibleClassChange(type);
if (UNLIKELY(icce)) {
- return NULL;
+ return nullptr;
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
mirror::Class* referring_class = referrer->GetDeclaringClass();
@@ -546,7 +546,7 @@ inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
!referring_class->CanAccessMember(methods_class,
resolved_method->GetAccessFlags()))) {
// Potential illegal access, may need to refine the method's class.
- return NULL;
+ return nullptr;
}
}
if (type == kInterface) { // Most common form of slow path dispatch.
@@ -606,7 +606,7 @@ inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
- mirror::Throwable* saved_exception = NULL;
+ mirror::Throwable* saved_exception = nullptr;
if (UNLIKELY(self->IsExceptionPending())) {
saved_exception = self->GetException();
self->ClearException();
@@ -620,7 +620,7 @@ inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
<< self->GetException()->Dump();
}
// Restore pending exception.
- if (saved_exception != NULL) {
+ if (saved_exception != nullptr) {
self->SetException(saved_exception);
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 768f505..ce56739 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -44,9 +44,9 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx,
return nullptr; // Failure
}
mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx);
- if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve
+ if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
- if (klass == NULL) { // Error
+ if (klass == nullptr) { // Error
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
@@ -231,13 +231,13 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
// Build argument array possibly triggering GC.
soa.Self()->AssertThreadSuspensionIsAllowable();
- jobjectArray args_jobj = NULL;
+ jobjectArray args_jobj = nullptr;
const JValue zero;
int32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
// Do not create empty arrays unless needed to maintain Dalvik bug compatibility.
if (args.size() > 0 || (target_sdk_version > 0 && target_sdk_version <= 21)) {
- args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL);
- if (args_jobj == NULL) {
+ args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, nullptr);
+ if (args_jobj == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -249,7 +249,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
JValue jv;
jv.SetJ(args.at(i).j);
mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv);
- if (val == NULL) {
+ if (val == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -270,7 +270,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
// Unbox result and handle error conditions.
if (LIKELY(!soa.Self()->IsExceptionPending())) {
- if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) {
+ if (shorty[0] == 'V' || (shorty[0] == 'L' && result == nullptr)) {
// Do nothing.
return zero;
} else {
@@ -315,7 +315,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
}
if (!declares_exception) {
soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;",
- NULL);
+ nullptr);
}
}
return zero;
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 28e19d4..d4844c2 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -47,7 +47,7 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::
method = shadow_frame->GetMethod();
}
}
- uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
+ uint16_t arg_offset = (code_item == nullptr) ? 0 : code_item->registers_size_ - code_item->ins_size_;
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
result, method->GetShorty());
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index 2752407..a68eeeb 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -34,15 +34,15 @@ extern "C" void* artFindNativeMethod(Thread* self) {
Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
ScopedObjectAccess soa(self);
- mirror::ArtMethod* method = self->GetCurrentMethod(NULL);
- DCHECK(method != NULL);
+ mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ DCHECK(method != nullptr);
- // Lookup symbol address for method, on failure we'll return NULL with an exception set,
+ // Lookup symbol address for method, on failure we'll return null with an exception set,
// otherwise we return the address of the method we found.
void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
- if (native_code == NULL) {
+ if (native_code == nullptr) {
DCHECK(self->IsExceptionPending());
- return NULL;
+ return nullptr;
} else {
// Register so that future calls don't come here
method->RegisterNative(native_code, false);
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index a6ab69b..37de380 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -22,8 +22,8 @@ namespace art {
// Assignable test for code, won't throw. Null and equality tests already performed
extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(klass != NULL);
- DCHECK(ref_class != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(ref_class != nullptr);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
}
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 54dbd8c..eb1b105 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -41,7 +41,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod*
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? nullptr : this_object,
method, lr, interpreter_entry);
- CHECK(result != NULL) << PrettyMethod(method);
+ CHECK(result != nullptr) << PrettyMethod(method);
return result;
}
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 9644b98..f22edc1 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -34,10 +34,10 @@ extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
- * exception may be NULL, in which case this routine should
+ * exception may be null, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
* which previously did the null check inline and constructed
- * and threw a NPE if NULL. This routine responsible for setting
+ * and threw a NPE if null. This routine responsible for setting
* exception_ in thread and delivering the exception.
*/
ScopedQuickEntrypointChecks sqec(self);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1770658..6808000 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -200,7 +200,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
fake_stack.push_back(0);
fake_stack.push_back(0xEBAD6070); // return pc
- // Push Method* of NULL to terminate the trace
+ // Push Method* of null to terminate the trace
fake_stack.push_back(0);
// Push null values which will become null incoming arguments.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 5224d64..399832a 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -238,9 +238,9 @@ class AtomicStack {
std::string error_msg;
mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
+ CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_->Begin();
- CHECK(addr != NULL);
+ CHECK(addr != nullptr);
debug_is_sorted_ = true;
begin_ = reinterpret_cast<StackReference<T>*>(addr);
Reset();
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7879632..1a7b1a3 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -36,7 +36,7 @@ constexpr uint8_t CardTable::kCardDirty;
/*
* Maintain a card table from the write barrier. All writes of
- * non-NULL values to heap addresses should go through an entry in
+ * non-null values to heap addresses should go through an entry in
* WriteBarrier, and from there to here.
*
* The heap is divided into "cards" of GC_CARD_SIZE bytes, as
@@ -44,7 +44,7 @@ constexpr uint8_t CardTable::kCardDirty;
* data per card, to be used by the GC. The value of the byte will be
* one of GC_CARD_CLEAN or GC_CARD_DIRTY.
*
- * After any store of a non-NULL object pointer into a heap object,
+ * After any store of a non-null object pointer into a heap object,
* code is obliged to mark the card dirty. The setters in
* object.h [such as SetFieldObject] do this for you. The
* compiler also contains code to mark cards as dirty.
@@ -64,13 +64,13 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
std::unique_ptr<MemMap> mem_map(
MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
false, false, &error_msg));
- CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
+ CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* cardtable_begin = mem_map->Begin();
- CHECK(cardtable_begin != NULL);
+ CHECK(cardtable_begin != nullptr);
// We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
// kCardDirty, compute a offset value to make this the case
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 896cce5..75ef58a 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -43,7 +43,7 @@ namespace accounting {
template<size_t kAlignment> class SpaceBitmap;
// Maintain a card table from the the write barrier. All writes of
-// non-NULL values to heap addresses should go through an entry in
+// non-null values to heap addresses should go through an entry in
// WriteBarrier, and from there to here.
class CardTable {
public:
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 11347a5..ae91200 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -55,7 +55,7 @@ template<size_t kAlignment>
inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK(HasAddress(obj)) << obj;
- DCHECK(bitmap_begin_ != NULL);
+ DCHECK(bitmap_begin_ != nullptr);
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2da8325..84dadea 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -104,8 +104,8 @@ void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
- CHECK(bitmap_begin_ != NULL);
- CHECK(callback != NULL);
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK(callback != nullptr);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
uintptr_t* bitmap_begin = bitmap_begin_;
@@ -132,7 +132,7 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm
CHECK(mark_bitmap.bitmap_begin_ != nullptr);
CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
- CHECK(callback != NULL);
+ CHECK(callback != nullptr);
CHECK_LE(sweep_begin, sweep_end);
CHECK_GE(sweep_begin, live_bitmap.heap_begin_);
@@ -186,7 +186,7 @@ void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visite
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
- if (super != NULL) {
+ if (super != nullptr) {
WalkInstanceFields(visited, callback, obj, super, arg);
}
// Walk instance fields
@@ -233,7 +233,7 @@ void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited
int32_t length = obj_array->GetLength();
for (int32_t i = 0; i < length; i++) {
mirror::Object* value = obj_array->Get(i);
- if (value != NULL) {
+ if (value != nullptr) {
WalkFieldsInOrder(visited, callback, value, arg);
}
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 850325a..edb08ef 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -34,7 +34,7 @@ TEST_F(SpaceBitmapTest, Init) {
size_t heap_capacity = 16 * MB;
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
}
class BitmapVerify {
@@ -62,7 +62,7 @@ TEST_F(SpaceBitmapTest, ScanRange) {
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
// Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 515f124..85234dc 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -115,7 +115,7 @@ RosAlloc::~RosAlloc() {
void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
lock_.AssertHeld(self);
DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
- FreePageRun* res = NULL;
+ FreePageRun* res = nullptr;
const size_t req_byte_size = num_pages * kPageSize;
// Find the lowest address free page run that's large enough.
for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
@@ -157,8 +157,8 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
}
// Failed to allocate pages. Grow the footprint, if possible.
- if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
- FreePageRun* last_free_page_run = NULL;
+ if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
+ FreePageRun* last_free_page_run = nullptr;
size_t last_free_page_run_size;
auto it = free_page_runs_.rbegin();
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
@@ -218,7 +218,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
DCHECK(it != free_page_runs_.rend());
FreePageRun* fpr = *it;
if (kIsDebugBuild && last_free_page_run_size > 0) {
- DCHECK(last_free_page_run != NULL);
+ DCHECK(last_free_page_run != nullptr);
DCHECK_EQ(last_free_page_run, fpr);
}
size_t fpr_byte_size = fpr->ByteSize(this);
@@ -249,7 +249,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
res = fpr;
}
}
- if (LIKELY(res != NULL)) {
+ if (LIKELY(res != nullptr)) {
// Update the page map.
size_t page_map_idx = ToPageMapIndex(res);
for (size_t i = 0; i < num_pages; i++) {
@@ -286,7 +286,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type
// Fail.
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocPages() : NULL";
+ LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
}
return nullptr;
}
@@ -468,7 +468,7 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
if (UNLIKELY(r == nullptr)) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
+ LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
}
return nullptr;
}
@@ -824,7 +824,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
// already in the non-full run set (i.e., it was full) insert it
// into the non-full run set.
if (run != current_runs_[idx]) {
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
auto pos = non_full_runs->find(run);
if (pos == non_full_runs->end()) {
DCHECK(run_was_full);
@@ -1275,7 +1275,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
// Check if the run should be moved to non_full_runs_ or
// free_page_runs_.
auto* non_full_runs = &non_full_runs_[idx];
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of the
// run.
@@ -1358,7 +1358,7 @@ std::string RosAlloc::DumpPageMap() {
stream << "RosAlloc PageMap: " << std::endl;
lock_.AssertHeld(Thread::Current());
size_t end = page_map_size_;
- FreePageRun* curr_fpr = NULL;
+ FreePageRun* curr_fpr = nullptr;
size_t curr_fpr_size = 0;
size_t remaining_curr_fpr_size = 0;
size_t num_running_empty_pages = 0;
@@ -1373,7 +1373,7 @@ std::string RosAlloc::DumpPageMap() {
// Encountered a fresh free page run.
DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
DCHECK(fpr->IsFree());
- DCHECK(curr_fpr == NULL);
+ DCHECK(curr_fpr == nullptr);
DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
curr_fpr = fpr;
curr_fpr_size = fpr->ByteSize(this);
@@ -1384,7 +1384,7 @@ std::string RosAlloc::DumpPageMap() {
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
@@ -1392,7 +1392,7 @@ std::string RosAlloc::DumpPageMap() {
} else {
// Still part of the current free page run.
DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
- DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
+ DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
remaining_curr_fpr_size -= kPageSize;
@@ -1400,7 +1400,7 @@ std::string RosAlloc::DumpPageMap() {
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
}
@@ -1546,7 +1546,7 @@ bool RosAlloc::Trim() {
void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg) {
// Note: no need to use this to release pages as we already do so in FreePages().
- if (handler == NULL) {
+ if (handler == nullptr) {
return;
}
MutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 14eb80b..f0e8d14 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@ class MarkStackTask : public Task {
mark_stack_pos_(mark_stack_size) {
// We may have to copy part of an existing mark stack when another mark stack overflows.
if (mark_stack_size != 0) {
- DCHECK(mark_stack != NULL);
+ DCHECK(mark_stack != nullptr);
// TODO: Check performance?
std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
}
@@ -850,7 +850,7 @@ class RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
- : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
+ : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
end_(end) {
}
@@ -1260,11 +1260,11 @@ void MarkSweep::ProcessMarkStack(bool paused) {
static const size_t kFifoSize = 4;
BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = NULL;
+ Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Object* mark_stack_obj = mark_stack_->PopBack();
- DCHECK(mark_stack_obj != NULL);
+ DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index dbf01d8..82d02e7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -750,7 +750,7 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
if (from_space_->HasAddress(obj)) {
- // Returns either the forwarding address or nullptr.
+ // Returns either the forwarding address or null.
return GetForwardingAddressInFromSpace(obj);
} else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
to_space_->HasAddress(obj)) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b770096..3e56205 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -293,7 +293,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator
return nullptr;
}
// Try allocating a new thread local buffer, if the allocaiton fails the space must be
- // full so return nullptr.
+ // full so return null.
if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
return nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index beaf067..b80c4b6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -412,7 +412,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
// Allocate the card table.
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
- CHECK(card_table_.get() != NULL) << "Failed to create card table";
+ CHECK(card_table_.get() != nullptr) << "Failed to create card table";
if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
rb_table_.reset(new accounting::ReadBarrierTable());
@@ -1052,7 +1052,7 @@ space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
@@ -1065,12 +1065,12 @@ space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
space::Space* result = FindContinuousSpaceFromObject(obj, true);
- if (result != NULL) {
+ if (result != nullptr) {
return result;
}
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
@@ -1082,7 +1082,7 @@ space::ImageSpace* Heap::GetImageSpace() const {
return space->AsImageSpace();
}
}
- return NULL;
+ return nullptr;
}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
@@ -2204,7 +2204,7 @@ void Heap::PreZygoteFork() {
// Turn the current alloc space into a zygote space and obtain the new alloc space composed of
// the remaining available space.
// Remove the old space before creating the zygote space since creating the zygote space sets
- // the old alloc space's bitmaps to nullptr.
+ // the old alloc space's bitmaps to null.
RemoveSpace(old_alloc_space);
if (collector::SemiSpace::kUseRememberedSet) {
// Sanity bound check.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 066b4c5..565687c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -396,7 +396,7 @@ class Heap {
void RecordFreeRevoke();
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
- // The call is not needed if NULL is stored in the field.
+ // The call is not needed if null is stored in the field.
ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
const mirror::Object* /*new_value*/) {
card_table_->MarkCard(dst);
@@ -991,7 +991,7 @@ class Heap {
// programs it is "cleared" making it the same as capacity.
size_t growth_limit_;
- // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
+ // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
// a GC should be triggered.
size_t max_allowed_footprint_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 7be0704..4c93a4c 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -30,7 +30,7 @@ ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (!ref->IsEnqueued()) {
EnqueuePendingReference(ref);
@@ -43,7 +43,7 @@ void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
}
void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
list_ = ref;
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index c496a42..df43606 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -45,7 +45,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 9eace89..4fc4ada 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -35,7 +35,7 @@ inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_b
obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- if (LIKELY(obj != NULL)) {
+ if (LIKELY(obj != nullptr)) {
// Zero freshly allocated memory, done while not holding the space's lock.
memset(obj, 0, num_bytes);
}
@@ -57,13 +57,13 @@ inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = allocation_size;
*bytes_tl_bulk_allocated = allocation_size;
}
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 225861d..7b1a421 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -39,7 +39,7 @@ DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::st
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
- CHECK(mspace != NULL);
+ CHECK(mspace != nullptr);
}
DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
@@ -176,7 +176,7 @@ size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
}
size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- DCHECK(ptrs != NULL);
+ DCHECK(ptrs != nullptr);
// Don't need the lock to calculate the size of the freed pointers.
size_t bytes_freed = 0;
@@ -232,7 +232,7 @@ void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_byte
void* arg) {
MutexLock mu(Thread::Current(), lock_);
mspace_inspect_all(mspace_, callback, arg);
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
size_t DlMallocSpace::GetFootprint() {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e28e8d7..f350038 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -666,7 +666,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
}
std::unique_ptr<File> file(OS::OpenFileForReading(image_filename));
- if (file.get() == NULL) {
+ if (file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open '%s'", image_filename);
return nullptr;
}
@@ -695,7 +695,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(),
PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
- if (map.get() == NULL) {
+ if (map.get() == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
}
@@ -786,7 +786,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
nullptr, error_msg);
- if (oat_file == NULL) {
+ if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
return nullptr;
@@ -811,7 +811,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
}
bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
for (const OatFile::OatDexFile* oat_dex_file : oat_file_->GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
uint32_t dex_file_location_checksum;
@@ -837,7 +837,7 @@ const OatFile* ImageSpace::GetOatFile() const {
}
OatFile* ImageSpace::ReleaseOatFile() {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
return oat_file_.release();
}
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 9ae2af4..54dc7a6 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -52,7 +52,7 @@ class ImageSpace : public MemMapSpace {
InstructionSet image_isa);
// Reads the image header from the specified image location for the
- // instruction set image_isa. Returns nullptr on failure, with
+ // instruction set image_isa. Returns null on failure, with
// reason in error_msg.
static ImageHeader* ReadImageHeader(const char* image_location,
InstructionSet image_isa,
@@ -122,7 +122,7 @@ class ImageSpace : public MemMapSpace {
private:
// Tries to initialize an ImageSpace from the given image path,
- // returning NULL on error.
+ // returning null on error.
//
// If validate_oat_file is false (for /system), do not verify that
// image's OatFile is up-to-date relative to its DexFile
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 7353c83..4dfdaa5 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -124,9 +124,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (UNLIKELY(mem_map == NULL)) {
+ if (UNLIKELY(mem_map == nullptr)) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
- return NULL;
+ return nullptr;
}
mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
if (kIsDebugBuild) {
@@ -206,7 +206,7 @@ void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg)
for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
MemMap* mem_map = it->second;
callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
- callback(NULL, NULL, 0, arg);
+ callback(nullptr, nullptr, 0, arg);
}
}
@@ -316,7 +316,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
+ CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 9195b06..b014217 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -75,13 +75,13 @@ MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size,
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(*initial_size) << ") is larger than its capacity ("
<< PrettySize(*growth_limit) << ")";
- return NULL;
+ return nullptr;
}
if (*growth_limit > *capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(*growth_limit) << ") is larger than the capacity ("
<< PrettySize(*capacity) << ")";
- return NULL;
+ return nullptr;
}
// Page align growth limit and capacity which will be used to manage mmapped storage
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bbf1bbb..5f3a1db 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -60,7 +60,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Allocate num_bytes without allowing the underlying space to grow.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
- // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
+ // Return the storage space required by obj. If usable_size isn't null then it is set to the
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
virtual size_t Free(Thread* self, mirror::Object* ptr)
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index b88ce24..19109f0 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -40,7 +40,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// space to confirm the request was granted.
static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 9d582a3..25d4445 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -73,18 +73,18 @@ inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes
rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_bytes_allocated,
&rosalloc_usable_size,
&rosalloc_bytes_tl_bulk_allocated));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = rosalloc_bytes_allocated;
DCHECK_EQ(rosalloc_usable_size, rosalloc_->UsableSize(result));
if (usable_size != nullptr) {
*usable_size = rosalloc_usable_size;
}
- DCHECK(bytes_tl_bulk_allocated != NULL);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
*bytes_tl_bulk_allocated = rosalloc_bytes_tl_bulk_allocated;
}
return result;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index eb1d5f4..2c7d93e 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -64,9 +64,9 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
capacity, low_memory_mode, running_on_valgrind);
- if (rosalloc == NULL) {
+ if (rosalloc == nullptr) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
- return NULL;
+ return nullptr;
}
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
@@ -113,10 +113,10 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz
size_t starting_size = Heap::kDefaultStartingSize;
MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
requested_begin);
- if (mem_map == NULL) {
+ if (mem_map == nullptr) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
- return NULL;
+ return nullptr;
}
RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
@@ -145,7 +145,7 @@ allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_
art::gc::allocator::RosAlloc::kPageReleaseModeAll :
art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
running_on_valgrind);
- if (rosalloc != NULL) {
+ if (rosalloc != nullptr) {
rosalloc->SetFootprintLimit(initial_size);
} else {
PLOG(ERROR) << "RosAlloc::Create failed";
@@ -170,7 +170,7 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
rosalloc_->SetFootprintLimit(footprint);
}
// Note RosAlloc zeroes memory internally.
- // Return the new allocation or NULL.
+ // Return the new allocation or null.
CHECK(!kDebugSpaces || result == nullptr || Contains(result));
return result;
}
@@ -192,7 +192,7 @@ MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& n
size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
if (kDebugSpaces) {
- CHECK(ptr != NULL);
+ CHECK(ptr != nullptr);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
if (kRecentFreeCount > 0) {
@@ -309,7 +309,7 @@ void RosAllocSpace::InspectAllRosAllocWithSuspendAll(
MutexLock mu2(self, *Locks::thread_list_lock_);
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
}
tl->ResumeAll();
@@ -324,7 +324,7 @@ void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end,
// from SignalCatcher::HandleSigQuit().
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
} else if (Locks::mutator_lock_->IsSharedHeld(self)) {
// The mutators are not suspended yet and we have a shared access
diff --git a/runtime/gc_map.h b/runtime/gc_map.h
index ffe54c4..b4ccdd6 100644
--- a/runtime/gc_map.h
+++ b/runtime/gc_map.h
@@ -28,7 +28,7 @@ namespace art {
class NativePcOffsetToReferenceMap {
public:
explicit NativePcOffsetToReferenceMap(const uint8_t* data) : data_(data) {
- CHECK(data_ != NULL);
+ CHECK(data_ != nullptr);
}
// The number of entries in the table.
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 23af25d..fb7ff54 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -538,7 +538,7 @@ class Hprof : public SingleRootVisitor {
// STRING format:
// ID: ID for this string
- // U1*: UTF8 characters for string (NOT NULL terminated)
+ // U1*: UTF8 characters for string (NOT null terminated)
// (the record format encodes the length)
__ AddU4(id);
__ AddUtf8String(string.c_str());
@@ -931,7 +931,7 @@ void Hprof::DumpHeapObject(mirror::Object* obj) {
mirror::Class* c = obj->GetClass();
if (c == nullptr) {
- // This object will bother HprofReader, because it has a NULL
+ // This object will bother HprofReader, because it has a null
// class, so just don't dump it. It could be
// gDvm.unlinkedJavaLangClass or it could be an object just
// allocated which hasn't been initialized yet.
@@ -1057,7 +1057,7 @@ void Hprof::DumpHeapArray(mirror::Array* obj, mirror::Class* klass) {
__ AddU4(length);
__ AddClassId(LookupClassId(klass));
- // Dump the elements, which are always objects or NULL.
+ // Dump the elements, which are always objects or null.
__ AddIdList(obj->AsObjectArray<mirror::Object>());
} else {
size_t size;
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e571a0e..639be51 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -31,7 +31,7 @@ class Object;
// Returns "false" if something looks bad.
inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
if (UNLIKELY(iref == nullptr)) {
- LOG(WARNING) << "Attempt to look up NULL " << kind_;
+ LOG(WARNING) << "Attempt to look up nullptr " << kind_;
return false;
}
if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d6f9682..e2b9559 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -103,9 +103,9 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
prevState.all = cookie;
size_t topIndex = segment_state_.parts.topIndex;
- CHECK(obj != NULL);
+ CHECK(obj != nullptr);
VerifyObject(obj);
- DCHECK(table_ != NULL);
+ DCHECK(table_ != nullptr);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
if (topIndex == max_entries_) {
@@ -144,7 +144,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
<< " holes=" << segment_state_.parts.numHoles;
}
- DCHECK(result != NULL);
+ DCHECK(result != nullptr);
return result;
}
@@ -172,7 +172,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
int topIndex = segment_state_.parts.topIndex;
int bottomIndex = prevState.parts.topIndex;
- DCHECK(table_ != NULL);
+ DCHECK(table_ != nullptr);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid &&
@@ -227,9 +227,8 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
}
}
} else {
- // Not the top-most entry. This creates a hole. We NULL out the
- // entry to prevent somebody from deleting it twice and screwing up
- // the hole count.
+ // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody
+ // from deleting it twice and screwing up the hole count.
if (table_[idx].GetReference()->IsNull()) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
@@ -270,9 +269,7 @@ void IndirectReferenceTable::Dump(std::ostream& os) const {
ReferenceTable::Table entries;
for (size_t i = 0; i < Capacity(); ++i) {
mirror::Object* obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
- if (UNLIKELY(obj == nullptr)) {
- // Remove NULLs.
- } else {
+ if (obj != nullptr) {
obj = table_[i].GetReference()->Read();
entries.push_back(GcRoot<mirror::Object>(obj));
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 0072184..a0e53af 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -268,9 +268,9 @@ class IndirectReferenceTable {
bool IsValid() const;
/*
- * Add a new entry. "obj" must be a valid non-NULL object reference.
+ * Add a new entry. "obj" must be a valid non-nullptr object reference.
*
- * Returns NULL if the table is full (max entries reached, or alloc
+ * Returns nullptr if the table is full (max entries reached, or alloc
* failed during expansion).
*/
IndirectRef Add(uint32_t cookie, mirror::Object* obj)
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index fe1b8f0..c20002b 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -216,7 +216,7 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
ASSERT_EQ(0U, irt.Capacity()) << "temporal del not empty";
CheckDump(&irt, 0, 0);
- // nullptr isn't a valid iref.
+ // null isn't a valid iref.
ASSERT_TRUE(irt.Get(nullptr) == nullptr);
// Stale lookup.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 51600f7..e6c333d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -177,14 +177,14 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
- if (m == NULL) {
+ if (m == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
}
last_return_pc_ = 0;
return true; // Ignore upcalls.
}
- if (GetCurrentQuickFrame() == NULL) {
+ if (GetCurrentQuickFrame() == nullptr) {
bool interpreter_frame = true;
InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, 0, GetFrameId(),
interpreter_frame);
@@ -309,7 +309,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
struct RestoreStackVisitor : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
- : StackVisitor(thread_in, NULL), thread_(thread_in),
+ : StackVisitor(thread_in, nullptr), thread_(thread_in),
instrumentation_exit_pc_(instrumentation_exit_pc),
instrumentation_(instrumentation),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -320,14 +320,14 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
return false; // Stop.
}
mirror::ArtMethod* m = GetMethod();
- if (GetCurrentQuickFrame() == NULL) {
+ if (GetCurrentQuickFrame() == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId()
<< " Method=" << PrettyMethod(m);
}
return true; // Ignore shadow frames.
}
- if (m == NULL) {
+ if (m == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
}
@@ -645,7 +645,7 @@ void Instrumentation::ResetQuickAllocEntryPoints() {
Runtime* runtime = Runtime::Current();
if (runtime->IsStarted()) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
+ runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, nullptr);
}
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 4c5fc81..a85d10f 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -194,7 +194,7 @@ mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
// GetResolvedString() contains a RB.
mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
- if (image_string != NULL) {
+ if (image_string != nullptr) {
return image_string;
}
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 200a764..1e5d3c2 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -150,7 +150,7 @@ class InternTable {
UnorderedSet post_zygote_table_;
};
- // Insert if non null, otherwise return nullptr.
+ // Insert if non null, otherwise return null.
mirror::String* Insert(mirror::String* s, bool is_strong)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index d462e14..194d0af 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -38,8 +38,8 @@ TEST_F(InternTableTest, Intern) {
EXPECT_TRUE(foo_1->Equals("foo"));
EXPECT_TRUE(foo_2->Equals("foo"));
EXPECT_TRUE(foo_3->Equals("foo"));
- EXPECT_TRUE(foo_1.Get() != NULL);
- EXPECT_TRUE(foo_2.Get() != NULL);
+ EXPECT_TRUE(foo_1.Get() != nullptr);
+ EXPECT_TRUE(foo_2.Get() != nullptr);
EXPECT_EQ(foo_1.Get(), foo_2.Get());
EXPECT_NE(foo_1.Get(), bar.Get());
EXPECT_NE(foo_2.Get(), bar.Get());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 4801124..423b952 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -78,7 +78,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
@@ -99,12 +100,14 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
+ fntype* const fn =
+ reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
@@ -122,9 +125,11 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
@@ -133,9 +138,11 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[2])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
@@ -144,7 +151,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
@@ -153,9 +161,11 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[2])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
} else {
@@ -187,7 +197,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
@@ -302,7 +313,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
const DexFile::CodeItem* code_item = method->GetCodeItem();
uint16_t num_regs;
uint16_t num_ins;
- if (code_item != NULL) {
+ if (code_item != nullptr) {
num_regs = code_item->registers_size_;
num_ins = code_item->ins_size_;
} else if (method->IsAbstract()) {
@@ -325,7 +336,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
size_t cur_reg = num_regs - num_ins;
if (!method->IsStatic()) {
- CHECK(receiver != NULL);
+ CHECK(receiver != nullptr);
shadow_frame->SetVRegReference(cur_reg, receiver);
++cur_reg;
}
@@ -365,7 +376,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
}
if (LIKELY(!method->IsNative())) {
JValue r = Execute(self, code_item, *shadow_frame, JValue());
- if (result != NULL) {
+ if (result != nullptr) {
*result = r;
}
} else {
@@ -386,8 +397,9 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JValue value;
- value.SetJ(ret_val->GetJ()); // Set value to last known result in case the shadow frame chain is empty.
- while (shadow_frame != NULL) {
+ // Set value to last known result in case the shadow frame chain is empty.
+ value.SetJ(ret_val->GetJ());
+ while (shadow_frame != nullptr) {
self->SetTopOfShadowStack(shadow_frame);
const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
const uint32_t dex_pc = shadow_frame->GetDexPC();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3ae611b..4765ebc 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -489,7 +489,7 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
const uint16_t num_ins = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
uint16_t num_regs;
- if (LIKELY(code_item != NULL)) {
+ if (LIKELY(code_item != nullptr)) {
num_regs = code_item->registers_size_;
DCHECK_EQ(num_ins, code_item->ins_size_);
} else {
@@ -543,11 +543,11 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
switch (shorty[shorty_pos + 1]) {
case 'L': {
Object* o = shadow_frame.GetVRegReference(src_reg);
- if (do_assignability_check && o != NULL) {
+ if (do_assignability_check && o != nullptr) {
Class* arg_type =
new_shadow_frame->GetMethod()->GetClassFromTypeIndex(
params->GetTypeItem(shorty_pos).type_idx_, true);
- if (arg_type == NULL) {
+ if (arg_type == nullptr) {
CHECK(self->IsExceptionPending());
return false;
}
@@ -651,7 +651,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(arrayClass == NULL)) {
+ if (UNLIKELY(arrayClass == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
@@ -671,7 +671,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
Object* newArray = Array::Alloc<true>(self, arrayClass, length,
arrayClass->GetComponentSizeShift(),
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(newArray == NULL)) {
+ if (UNLIKELY(newArray == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0e0d56a..dbedc16 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -367,9 +367,9 @@ static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruc
uint32_t raw_value = shadow_frame.GetVReg(i);
Object* ref_value = shadow_frame.GetVRegReference(i);
oss << StringPrintf(" vreg%u=0x%08X", i, raw_value);
- if (ref_value != NULL) {
+ if (ref_value != nullptr) {
if (ref_value->GetClass()->IsStringClass() &&
- ref_value->AsString()->GetCharArray() != NULL) {
+ ref_value->AsString()->GetCharArray() != nullptr) {
oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
} else {
oss << "/" << PrettyTypeOf(ref_value);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index cead26c..dc0b687 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -55,7 +55,8 @@ namespace interpreter {
} while (false)
#define UPDATE_HANDLER_TABLE() \
- currentHandlersTable = handlersTable[Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
+ currentHandlersTable = handlersTable[ \
+ Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
#define BACKWARD_BRANCH_INSTRUMENTATION(offset) \
do { \
@@ -328,10 +329,10 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
self->AllowThreadSuspension();
const uint8_t vreg_index = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (do_assignability_check && obj_result != NULL) {
+ if (do_assignability_check && obj_result != nullptr) {
Class* return_type = shadow_frame.GetMethod()->GetReturnType();
obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (return_type == NULL) {
+ if (return_type == nullptr) {
// Return the pending exception.
HANDLE_PENDING_EXCEPTION();
}
@@ -364,7 +365,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = inst->VRegB_11n(inst_data);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(1);
}
@@ -375,7 +376,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = inst->VRegB_21s();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(2);
}
@@ -386,7 +387,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = inst->VRegB_31i();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(3);
}
@@ -397,7 +398,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(2);
}
@@ -426,7 +427,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CONST_STRING) {
String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -437,7 +438,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -449,7 +450,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CONST_CLASS) {
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -460,7 +461,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -472,7 +473,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -485,11 +486,11 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(CHECK_CAST) {
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
HANDLE_PENDING_EXCEPTION();
} else {
@@ -502,11 +503,11 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(INSTANCE_OF) {
Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
ADVANCE(2);
}
}
@@ -514,7 +515,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
- if (UNLIKELY(array == NULL)) {
+ if (UNLIKELY(array == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -529,7 +530,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
Object* obj = AllocObjectFromCode<do_access_check, true>(
inst->VRegB_21c(), shadow_frame.GetMethod(), self,
runtime->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -551,7 +552,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
Object* obj = AllocArrayFromCode<do_access_check, true>(
inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -591,7 +592,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(THROW) {
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(exception == NULL)) {
+ if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
@@ -778,7 +779,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_NE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -795,7 +797,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_LT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -812,7 +815,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_GE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -829,7 +833,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_GT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -846,7 +851,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_LE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -966,7 +972,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -984,7 +990,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1002,7 +1008,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1020,7 +1026,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1038,7 +1044,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1056,7 +1062,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1074,7 +1080,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1092,7 +1098,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1111,7 +1117,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1130,7 +1136,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1149,7 +1155,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1168,7 +1174,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1187,7 +1193,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1206,7 +1212,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1224,43 +1230,50 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_BOOLEAN) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_BYTE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_CHAR) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_SHORT) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_WIDE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_OBJECT) {
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
@@ -1308,314 +1321,366 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_BOOLEAN) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_BYTE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_CHAR) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_SHORT) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_WIDE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_OBJECT) {
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BOOLEAN) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BYTE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_CHAR) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_SHORT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_WIDE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_OBJECT) {
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BOOLEAN_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BYTE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_CHAR_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_SHORT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_OBJECT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_BOOLEAN) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_BYTE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_CHAR) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_SHORT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_WIDE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_OBJECT) {
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL) {
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE) {
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_SUPER) {
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_SUPER_RANGE) {
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_DIRECT) {
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_DIRECT_RANGE) {
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_INTERFACE) {
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_INTERFACE_RANGE) {
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_STATIC) {
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_STATIC_RANGE) {
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_QUICK) {
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<false>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE_QUICK) {
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<true>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NOT_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NOT_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -1636,7 +1701,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(FLOAT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -1657,7 +1723,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(DOUBLE_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -2213,15 +2280,17 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(DIV_INT_LIT16) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ bool success = DoIntDivide(
+ shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(REM_INT_LIT16) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ bool success = DoIntRemainder(
+ shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index fe7ad77..82f0009 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -68,7 +68,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
uint32_t dex_pc = shadow_frame.GetDexPC();
bool notified_method_entry_event = false;
- const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+ const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing.
if (kIsDebugBuild) {
self->AssertNoPendingException();
@@ -231,11 +231,11 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
self->AllowThreadSuspension();
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
- if (do_assignability_check && obj_result != NULL) {
+ if (do_assignability_check && obj_result != nullptr) {
Class* return_type = shadow_frame.GetMethod()->GetReturnType();
// Re-load since it might have moved.
obj_result = shadow_frame.GetVRegReference(ref_idx);
- if (return_type == NULL) {
+ if (return_type == nullptr) {
// Return the pending exception.
HANDLE_PENDING_EXCEPTION();
}
@@ -266,7 +266,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int4_t val = inst->VRegB_11n(inst_data);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_1xx();
break;
@@ -277,7 +277,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int16_t val = inst->VRegB_21s();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_2xx();
break;
@@ -288,7 +288,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int32_t val = inst->VRegB_31i();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_3xx();
break;
@@ -299,7 +299,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_2xx();
break;
@@ -328,7 +328,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::CONST_STRING: {
PREAMBLE();
String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -339,7 +339,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::CONST_STRING_JUMBO: {
PREAMBLE();
String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -351,7 +351,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -362,7 +362,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::MONITOR_ENTER: {
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -374,7 +374,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::MONITOR_EXIT: {
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -387,11 +387,11 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
HANDLE_PENDING_EXCEPTION();
} else {
@@ -404,11 +404,12 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
+ (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
inst = inst->Next_2xx();
}
break;
@@ -416,7 +417,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::ARRAY_LENGTH: {
PREAMBLE();
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
- if (UNLIKELY(array == NULL)) {
+ if (UNLIKELY(array == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -431,7 +432,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
Object* obj = AllocObjectFromCode<do_access_check, true>(
inst->VRegB_21c(), shadow_frame.GetMethod(), self,
runtime->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -454,7 +455,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
Object* obj = AllocArrayFromCode<do_access_check, true>(
inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -498,7 +499,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::THROW: {
PREAMBLE();
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(exception == NULL)) {
+ if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
@@ -651,7 +652,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_EQ: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) ==
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -664,7 +666,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_NE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -677,7 +680,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_LT: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -690,7 +694,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_GE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -703,7 +708,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_GT: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -716,7 +722,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IF_LE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -808,7 +815,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_BOOLEAN: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -826,7 +833,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_BYTE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -844,7 +851,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_CHAR: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -862,7 +869,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_SHORT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -880,7 +887,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -898,7 +905,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_WIDE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -916,7 +923,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::AGET_OBJECT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -934,7 +941,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_BOOLEAN: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -953,7 +960,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_BYTE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -972,7 +979,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_CHAR: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -991,7 +998,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_SHORT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1010,7 +1017,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1029,7 +1036,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_WIDE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1048,7 +1055,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::APUT_OBJECT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1066,43 +1073,50 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::IGET_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_BYTE: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_CHAR: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_SHORT: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_WIDE: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_OBJECT: {
PREAMBLE();
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
@@ -1150,272 +1164,318 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
case Instruction::SGET_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_BYTE: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_CHAR: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_SHORT: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_WIDE: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_OBJECT: {
PREAMBLE();
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BYTE: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_CHAR: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_SHORT: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_WIDE: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_OBJECT: {
PREAMBLE();
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BOOLEAN_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BYTE_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_CHAR_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_SHORT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_WIDE_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_OBJECT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_BYTE: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_CHAR: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_SHORT: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_WIDE: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_OBJECT: {
PREAMBLE();
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::INVOKE_VIRTUAL: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_SUPER: {
PREAMBLE();
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_SUPER_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_DIRECT: {
PREAMBLE();
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_DIRECT_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_INTERFACE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_INTERFACE_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_STATIC: {
PREAMBLE();
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_STATIC_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<false>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<true>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::NEG_INT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NOT_INT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_LONG:
PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NOT_LONG:
PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_FLOAT:
PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_DOUBLE:
PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_LONG:
@@ -1500,20 +1560,20 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
break;
case Instruction::INT_TO_BYTE:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int8_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_CHAR:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<uint16_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_SHORT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int16_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::ADD_INT: {
@@ -2050,14 +2110,16 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
case Instruction::DIV_INT_LIT16: {
PREAMBLE();
bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::REM_INT_LIT16: {
PREAMBLE();
bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 61def35..f30c93a 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -915,7 +915,7 @@ static void UnstartedJNIArrayCreateObjectArray(Thread* self,
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
mirror::Class* array_class = class_linker->FindArrayClass(self, &element_class);
- if (UNLIKELY(array_class == NULL)) {
+ if (UNLIKELY(array_class == nullptr)) {
CHECK(self->IsExceptionPending());
return;
}
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 8dffee6..55441c9 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -127,7 +127,7 @@ struct JdwpState {
* Among other things, this binds to a port to listen for a connection from
* the debugger.
*
- * Returns a newly-allocated JdwpState struct on success, or NULL on failure.
+ * Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
*/
static JdwpState* Create(const JdwpOptions* options)
LOCKS_EXCLUDED(Locks::mutator_lock_);
diff --git a/runtime/jdwp/jdwp_expand_buf.cc b/runtime/jdwp/jdwp_expand_buf.cc
index cc85cdd..e492d7e 100644
--- a/runtime/jdwp/jdwp_expand_buf.cc
+++ b/runtime/jdwp/jdwp_expand_buf.cc
@@ -156,7 +156,7 @@ static void SetUtf8String(uint8_t* buf, const char* str, size_t strLen) {
}
/*
- * Add a UTF8 string as a 4-byte length followed by a non-NULL-terminated
+ * Add a UTF8 string as a 4-byte length followed by a non-nullptr-terminated
* string.
*
* Because these strings are coming out of the VM, it's safe to assume that
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 2457f14..8e9ab32 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -133,7 +133,7 @@ static JdwpError RequestInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
if (is_constructor) {
// If we invoked a constructor (which actually returns void), return the receiver,
- // unless we threw, in which case we return NULL.
+ // unless we threw, in which case we return null.
resultTag = JT_OBJECT;
resultValue = (exceptObjId == 0) ? object_id : 0;
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 8a20e39..da891fe 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -86,10 +86,10 @@ class JitCodeCache {
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
- // Reserve a region of code of size at least "size". Returns nullptr if there is no more room.
+ // Reserve a region of code of size at least "size". Returns null if there is no more room.
uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
- // Add a data array of size (end - begin) with the associated contents, returns nullptr if there
+ // Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
LOCKS_EXCLUDED(lock_);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 554a28d..f5a3a6b 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -350,7 +350,7 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::Object* obj_field = soa.Decode<mirror::Object*>(jlr_field);
if (obj_field->GetClass() != mirror::Field::StaticClass()) {
- // Not even a java.lang.reflect.Field, return nullptr.
+ // Not even a java.lang.reflect.Field, return null. TODO, is this check necessary?
return nullptr;
}
auto* field = static_cast<mirror::Field*>(obj_field);
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 5516eab..77db404 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -882,7 +882,7 @@ TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
}
static void BogusMethod() {
- // You can't pass nullptr function pointers to RegisterNatives.
+ // You can't pass null function pointers to RegisterNatives.
}
TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
@@ -1025,13 +1025,13 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
env_->set_region_fn(a, size - 1, size, nullptr); \
ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ /* It's okay for the buffer to be null as long as the length is 0. */ \
env_->get_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
env_->get_region_fn(a, 123, 0, nullptr); \
ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ /* It's okay for the buffer to be null as long as the length is 0. */ \
env_->set_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
env_->set_region_fn(a, 123, 0, nullptr); \
@@ -1200,7 +1200,7 @@ TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
}
TEST_F(JniInternalTest, GetArrayLength) {
- // Already tested in NewObjectArray/NewPrimitiveArray except for NULL.
+ // Already tested in NewObjectArray/NewPrimitiveArray except for null.
CheckJniAbortCatcher jni_abort_catcher;
bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(0, env_->GetArrayLength(nullptr));
@@ -1463,7 +1463,7 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
EXPECT_EQ('l', chars[2]);
EXPECT_EQ('x', chars[3]);
- // It's okay for the buffer to be nullptr as long as the length is 0.
+ // It's okay for the buffer to be null as long as the length is 0.
env_->GetStringRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
env_->GetStringRegion(s, 123, 0, nullptr);
@@ -1485,7 +1485,7 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
EXPECT_EQ('l', bytes[2]);
EXPECT_EQ('x', bytes[3]);
- // It's okay for the buffer to be nullptr as long as the length is 0.
+ // It's okay for the buffer to be null as long as the length is 0.
env_->GetStringUTFRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
env_->GetStringUTFRegion(s, 123, 0, nullptr);
@@ -1493,7 +1493,7 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
}
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
- // Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
+ // Passing in a null jstring is ignored normally, but caught by -Xcheck:jni.
bool old_check_jni = vm_->SetCheckJniEnabled(false);
{
CheckJniAbortCatcher check_jni_abort_catcher;
@@ -2102,7 +2102,7 @@ TEST_F(JniInternalTest, MonitorEnterExit) {
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, imse_class));
- // It's an error to call MonitorEnter or MonitorExit on nullptr.
+ // It's an error to call MonitorEnter or MonitorExit on null.
{
CheckJniAbortCatcher check_jni_abort_catcher;
env_->MonitorEnter(nullptr);
diff --git a/runtime/mapping_table.h b/runtime/mapping_table.h
index 79e6e94..dcd5f00 100644
--- a/runtime/mapping_table.h
+++ b/runtime/mapping_table.h
@@ -106,7 +106,7 @@ class MappingTable {
const MappingTable* const table_; // The original table.
uint32_t element_; // A value in the range 0 to end_.
const uint32_t end_; // Equal to table_->DexToPcSize().
- const uint8_t* encoded_table_ptr_; // Either nullptr or points to encoded data after this entry.
+ const uint8_t* encoded_table_ptr_; // Either null or points to encoded data after this entry.
uint32_t native_pc_offset_; // The current value of native pc offset.
uint32_t dex_pc_; // The current value of dex pc.
};
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index edd2888..959bb75 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -190,7 +190,7 @@ static bool CheckNonOverlapping(uintptr_t begin,
// the expected value, calling munmap if validation fails, giving the
// reason in error_msg.
//
-// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// If the expected_ptr is null, nothing is checked beyond the fact
// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
// non-null, we check that pointer is the actual_ptr == expected_ptr,
// and if not, report in error_msg what the conflict mapping was if
@@ -398,8 +398,8 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt
page_aligned_byte_count, prot, false);
}
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, int fd,
- off_t start, bool reuse, const char* filename,
+MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
+ int fd, off_t start, bool reuse, const char* filename,
std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
@@ -429,7 +429,8 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int p
size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
// The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
// not necessarily to virtual memory. mmap will page align 'expected' for us.
- uint8_t* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
+ uint8_t* page_aligned_expected =
+ (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
page_aligned_byte_count,
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 11b2569..dc6d935 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -53,24 +53,25 @@ static constexpr bool kMadviseZeroes = false;
class MemMap {
public:
// Request an anonymous region of length 'byte_count' and a requested base address.
- // Use NULL as the requested base address if you don't care.
+ // Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
//
// The word "anonymous" in this context means "not backed by a file". The supplied
// 'ashmem_name' will be used -- on systems that support it -- to give the mapping
// a name.
//
- // On success, returns returns a MemMap instance. On failure, returns a NULL;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
bool low_4gb, bool reuse, std::string* error_msg);
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative.
//
- // On success, returns returns a MemMap instance. On failure, returns a NULL;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
const char* filename, std::string* error_msg) {
- return MapFileAtAddress(NULL, byte_count, prot, flags, fd, start, false, filename, error_msg);
+ return MapFileAtAddress(
+ nullptr, byte_count, prot, flags, fd, start, false, filename, error_msg);
}
// Map part of a file, taking care of non-page aligned offsets. The
@@ -79,13 +80,12 @@ class MemMap {
// mapping. "reuse" allows us to create a view into an existing
// mapping where we do not take ownership of the memory.
//
- // On success, returns returns a MemMap instance. On failure, returns a
- // nullptr;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg);
- // Releases the memory mapping
+ // Releases the memory mapping.
~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
const std::string& GetName() const {
diff --git a/runtime/memory_region.cc b/runtime/memory_region.cc
index 06eba0f..a5c70c3 100644
--- a/runtime/memory_region.cc
+++ b/runtime/memory_region.cc
@@ -25,7 +25,7 @@
namespace art {
void MemoryRegion::CopyFrom(size_t offset, const MemoryRegion& from) const {
- CHECK(from.pointer() != NULL);
+ CHECK(from.pointer() != nullptr);
CHECK_GT(from.size(), 0U);
CHECK_GE(this->size(), from.size());
CHECK_LE(offset, this->size() - from.size());
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 5fc96ad..0f306e8 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -48,7 +48,7 @@ inline Class* ArtMethod::GetJavaLangReflectArtMethod() {
inline Class* ArtMethod::GetDeclaringClass() {
Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_));
- DCHECK(result != NULL) << this;
+ DCHECK(result != nullptr) << this;
DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this;
return result;
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 9483ba6..543cf9b 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -362,7 +362,7 @@ const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods.
if (class_linker->IsQuickToInterpreterBridge(code) ||
@@ -503,7 +503,7 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
ClassLinker* class_linker = runtime->GetClassLinker();
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods. And we really shouldn't see a failure for non-native methods here.
DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index b899b25..0da5925 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -341,10 +341,10 @@ class MANAGED ArtMethod FINAL : public Object {
return reinterpret_cast<const void*>(code);
}
- // Actual entry point pointer to compiled oat code or nullptr.
+ // Actual entry point pointer to compiled oat code or null.
const void* GetQuickOatEntryPoint(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Actual pointer to compiled oat code or nullptr.
+ // Actual pointer to compiled oat code or null.
const void* GetQuickOatCodePointer(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index aaa66f9..712286f 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -66,7 +66,7 @@ inline ObjectArray<ArtMethod>* Class::GetDirectMethods() {
inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(NULL == GetFieldObject<ObjectArray<ArtMethod>>(
+ DCHECK(nullptr == GetFieldObject<ObjectArray<ArtMethod>>(
OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
DCHECK_NE(0, new_direct_methods->GetLength());
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), new_direct_methods);
@@ -85,7 +85,7 @@ inline void Class::SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
// Returns the number of static, private, and constructor methods.
inline uint32_t Class::NumDirectMethods() {
- return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0;
+ return (GetDirectMethods() != nullptr) ? GetDirectMethods()->GetLength() : 0;
}
template<VerifyObjectFlags kVerifyFlags>
@@ -102,7 +102,7 @@ inline void Class::SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods
}
inline uint32_t Class::NumVirtualMethods() {
- return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0;
+ return (GetVirtualMethods() != nullptr) ? GetVirtualMethods()->GetLength() : 0;
}
template<VerifyObjectFlags kVerifyFlags>
@@ -186,7 +186,7 @@ inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) {
}
inline bool Class::Implements(Class* klass) {
- DCHECK(klass != NULL);
+ DCHECK(klass != nullptr);
DCHECK(klass->IsInterface()) << PrettyClass(this);
// All interfaces implemented directly and by our superclass, and
// recursively all super-interfaces of those interfaces, are listed
@@ -233,8 +233,8 @@ inline bool Class::IsAssignableFromArray(Class* src) {
// If "this" is not also an array, it must be Object.
// src's super should be java_lang_Object, since it is an array.
Class* java_lang_Object = src->GetSuperClass();
- DCHECK(java_lang_Object != NULL) << PrettyClass(src);
- DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src);
+ DCHECK(java_lang_Object != nullptr) << PrettyClass(src);
+ DCHECK(java_lang_Object->GetSuperClass() == nullptr) << PrettyClass(src);
return this == java_lang_Object;
}
return IsArrayAssignableFromArray(src);
@@ -335,13 +335,13 @@ inline bool Class::IsSubClass(Class* klass) {
return true;
}
current = current->GetSuperClass();
- } while (current != NULL);
+ } while (current != nullptr);
return false;
}
inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
Class* declaring_class = method->GetDeclaringClass();
- DCHECK(declaring_class != NULL) << PrettyClass(this);
+ DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
// TODO cache to improve lookup speed
int32_t iftable_count = GetIfTableCount();
@@ -351,7 +351,7 @@ inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
return iftable->GetMethodArray(i)->Get(method->GetMethodIndex());
}
}
- return NULL;
+ return nullptr;
}
inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) {
@@ -382,7 +382,7 @@ inline IfTable* Class::GetIfTable() {
inline int32_t Class::GetIfTableCount() {
IfTable* iftable = GetIfTable();
- if (iftable == NULL) {
+ if (iftable == nullptr) {
return 0;
}
return iftable->Count();
@@ -484,7 +484,7 @@ inline void Class::SetClinitThreadId(pid_t new_clinit_thread_id) {
}
inline void Class::SetVerifyErrorClass(Class* klass) {
- CHECK(klass != NULL) << PrettyClass(this);
+ CHECK(klass != nullptr) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass);
} else {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 5005346..18496fd 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -400,7 +400,7 @@ class MANAGED Class FINAL : public Object {
// Depth of class from java.lang.Object
uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t depth = 0;
- for (Class* klass = this; klass->GetSuperClass() != NULL; klass = klass->GetSuperClass()) {
+ for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
depth++;
}
return depth;
@@ -409,7 +409,7 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetComponentType<kVerifyFlags, kReadBarrierOption>() != NULL;
+ return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -437,8 +437,8 @@ class MANAGED Class FINAL : public Object {
}
void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(GetComponentType() == NULL);
- DCHECK(new_component_type != NULL);
+ DCHECK(GetComponentType() == nullptr);
+ DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type);
}
@@ -454,7 +454,7 @@ class MANAGED Class FINAL : public Object {
}
bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return !IsPrimitive() && GetSuperClass() == NULL;
+ return !IsPrimitive() && GetSuperClass() == nullptr;
}
bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -611,7 +611,7 @@ class MANAGED Class FINAL : public Object {
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(src != NULL);
+ DCHECK(src != nullptr);
if (this == src) {
// Can always assign to things of the same type.
return true;
@@ -638,7 +638,7 @@ class MANAGED Class FINAL : public Object {
}
bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetSuperClass() != NULL;
+ return GetSuperClass() != nullptr;
}
static MemberOffset SuperClassOffset() {
@@ -1103,14 +1103,14 @@ class MANAGED Class FINAL : public Object {
bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // defining class loader, or NULL for the "bootstrap" system loader
+ // Defining class loader, or null for the "bootstrap" system loader.
HeapReference<ClassLoader> class_loader_;
// For array classes, the component class object for instanceof/checkcast
- // (for String[][][], this will be String[][]). NULL for non-array classes.
+ // (for String[][][], this will be String[][]). null for non-array classes.
HeapReference<Class> component_type_;
- // DexCache of resolved constant pool entries (will be NULL for classes generated by the
+ // DexCache of resolved constant pool entries (will be null for classes generated by the
// runtime such as arrays and primitive classes).
HeapReference<DexCache> dex_cache_;
@@ -1136,7 +1136,7 @@ class MANAGED Class FINAL : public Object {
// Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
HeapReference<String> name_;
- // The superclass, or NULL if this is java.lang.Object, an interface or primitive type.
+ // The superclass, or null if this is java.lang.Object, an interface or primitive type.
HeapReference<Class> super_class_;
// If class verify fails, we must return same error on subsequent tries.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 1d6846b..228fce5 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,10 +34,10 @@ class DexCacheTest : public CommonRuntimeTest {};
TEST_F(DexCacheTest, Open) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
- ASSERT_TRUE(java_lang_dex_file_ != NULL);
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
Handle<DexCache> dex_cache(
hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
- ASSERT_TRUE(dex_cache.Get() != NULL);
+ ASSERT_TRUE(dex_cache.Get() != nullptr);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index d1309d2..b465d07 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -23,7 +23,7 @@ namespace art {
namespace mirror {
inline void IfTable::SetInterface(int32_t i, Class* interface) {
- DCHECK(interface != NULL);
+ DCHECK(interface != nullptr);
DCHECK(interface->IsInterface());
const size_t idx = i * kMax + kInterface;
DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 4d899d2..1c1c7b3 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -27,7 +27,7 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
public:
ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
- DCHECK(interface != NULL);
+ DCHECK(interface != nullptr);
return interface;
}
@@ -37,14 +37,14 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* method_array =
down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
- DCHECK(method_array != NULL);
+ DCHECK(method_array != nullptr);
return method_array;
}
size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* method_array =
down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
- if (method_array == NULL) {
+ if (method_array == nullptr) {
return 0;
}
return method_array->GetLength();
@@ -52,8 +52,8 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
void SetMethodArray(int32_t i, ObjectArray<ArtMethod>* new_ma)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(new_ma != NULL);
- DCHECK(Get((i * kMax) + kMethodArray) == NULL);
+ DCHECK(new_ma != nullptr);
+ DCHECK(Get((i * kMax) + kMethodArray) == nullptr);
Set<false>((i * kMax) + kMethodArray, new_ma);
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index af0e856..2581fad 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -48,7 +48,7 @@ inline Class* Object::GetClass() {
template<VerifyObjectFlags kVerifyFlags>
inline void Object::SetClass(Class* new_klass) {
- // new_klass may be NULL prior to class linker initialization.
+ // new_klass may be null prior to class linker initialization.
// We don't mark the card as this occurs as part of object allocation. Not all objects have
// backing cards, such as large objects.
// We use non transactional version since we can't undo this write. We also disable checking as
@@ -179,15 +179,15 @@ inline void Object::AssertReadBarrierPointer() const {
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::VerifierInstanceOf(Class* klass) {
- DCHECK(klass != NULL);
- DCHECK(GetClass<kVerifyFlags>() != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(GetClass<kVerifyFlags>() != nullptr);
return klass->IsInterface() || InstanceOf(klass);
}
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::InstanceOf(Class* klass) {
- DCHECK(klass != NULL);
- DCHECK(GetClass<kVerifyNone>() != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(GetClass<kVerifyNone>() != nullptr);
return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 04d0cd8..5dac985 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -203,7 +203,7 @@ void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_val
!runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
return;
}
- for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
+ for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
ArtField* fields = cur->GetIFields();
for (size_t i = 0, count = cur->NumInstanceFields(); i < count; ++i) {
StackHandleScope<1> hs(Thread::Current());
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 30bc1cd..d473816 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -57,14 +57,14 @@ template<class T>
inline T* ObjectArray<T>::Get(int32_t i) {
if (!CheckIsValidIndex(i)) {
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
return GetFieldObject<T>(OffsetOfElement(i));
}
template<class T> template<VerifyObjectFlags kVerifyFlags>
inline bool ObjectArray<T>::CheckAssignable(T* object) {
- if (object != NULL) {
+ if (object != nullptr) {
Class* element_class = GetClass<kVerifyFlags>()->GetComponentType();
if (UNLIKELY(!object->InstanceOf(element_class))) {
ThrowArrayStoreException(object);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 747a008..2262af5 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -61,11 +61,12 @@ class ObjectTest : public CommonRuntimeTest {
Handle<String> string(
hs.NewHandle(String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in)));
ASSERT_EQ(expected_utf16_length, string->GetLength());
- ASSERT_TRUE(string->GetCharArray() != NULL);
- ASSERT_TRUE(string->GetCharArray()->GetData() != NULL);
+ ASSERT_TRUE(string->GetCharArray() != nullptr);
+ ASSERT_TRUE(string->GetCharArray()->GetData() != nullptr);
// strlen is necessary because the 1-character string "\x00\x00" is interpreted as ""
ASSERT_TRUE(string->Equals(utf8_in) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
- ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
+ ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) ||
+ (expected_utf16_length == 1 && strlen(utf8_in) == 0));
for (int32_t i = 0; i < expected_utf16_length; i++) {
EXPECT_EQ(utf16_expected[i], string->UncheckedCharAt(i));
}
@@ -110,11 +111,11 @@ TEST_F(ObjectTest, AllocObjectArray) {
Handle<ObjectArray<Object>> oa(
hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 2)));
EXPECT_EQ(2, oa->GetLength());
- EXPECT_TRUE(oa->Get(0) == NULL);
- EXPECT_TRUE(oa->Get(1) == NULL);
+ EXPECT_TRUE(oa->Get(0) == nullptr);
+ EXPECT_TRUE(oa->Get(1) == nullptr);
oa->Set<false>(0, oa.Get());
EXPECT_TRUE(oa->Get(0) == oa.Get());
- EXPECT_TRUE(oa->Get(1) == NULL);
+ EXPECT_TRUE(oa->Get(1) == nullptr);
oa->Set<false>(1, oa.Get());
EXPECT_TRUE(oa->Get(0) == oa.Get());
EXPECT_TRUE(oa->Get(1) == oa.Get());
@@ -122,17 +123,17 @@ TEST_F(ObjectTest, AllocObjectArray) {
Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/ArrayIndexOutOfBoundsException;");
- EXPECT_TRUE(oa->Get(-1) == NULL);
+ EXPECT_TRUE(oa->Get(-1) == nullptr);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
- EXPECT_TRUE(oa->Get(2) == NULL);
+ EXPECT_TRUE(oa->Get(2) == nullptr);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
- ASSERT_TRUE(oa->GetClass() != NULL);
+ ASSERT_TRUE(oa->GetClass() != nullptr);
Handle<mirror::Class> klass(hs.NewHandle(oa->GetClass()));
ASSERT_EQ(2U, klass->NumDirectInterfaces());
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
@@ -308,13 +309,14 @@ TEST_F(ObjectTest, CheckAndAllocArrayFromCode) {
Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V");
const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I");
- ASSERT_TRUE(string_id != NULL);
+ ASSERT_TRUE(string_id != nullptr);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(
java_lang_dex_file_->GetIndexForStringId(*string_id));
- ASSERT_TRUE(type_id != NULL);
+ ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
- Object* array = CheckAndAllocArrayFromCodeInstrumented(type_idx, 3, sort, Thread::Current(), false,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ Object* array = CheckAndAllocArrayFromCodeInstrumented(
+ type_idx, 3, sort, Thread::Current(), false,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
EXPECT_TRUE(array->IsArrayInstance());
EXPECT_EQ(3, array->AsArray()->GetLength());
EXPECT_TRUE(array->GetClass()->IsArrayClass());
@@ -367,36 +369,36 @@ TEST_F(ObjectTest, StaticFieldFromCode) {
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
ArtMethod* clinit = klass->FindClassInitializer();
const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
- ASSERT_TRUE(klass_string_id != NULL);
+ ASSERT_TRUE(klass_string_id != nullptr);
const DexFile::TypeId* klass_type_id = dex_file->FindTypeId(
dex_file->GetIndexForStringId(*klass_string_id));
- ASSERT_TRUE(klass_type_id != NULL);
+ ASSERT_TRUE(klass_type_id != nullptr);
const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;");
- ASSERT_TRUE(type_string_id != NULL);
+ ASSERT_TRUE(type_string_id != nullptr);
const DexFile::TypeId* type_type_id = dex_file->FindTypeId(
dex_file->GetIndexForStringId(*type_string_id));
- ASSERT_TRUE(type_type_id != NULL);
+ ASSERT_TRUE(type_type_id != nullptr);
const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
- ASSERT_TRUE(name_str_id != NULL);
+ ASSERT_TRUE(name_str_id != nullptr);
const DexFile::FieldId* field_id = dex_file->FindFieldId(
*klass_type_id, *name_str_id, *type_type_id);
- ASSERT_TRUE(field_id != NULL);
+ ASSERT_TRUE(field_id != nullptr);
uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
ArtField* field = FindFieldFromCode<StaticObjectRead, true>(field_idx, clinit, Thread::Current(),
sizeof(HeapReference<Object>));
Object* s0 = field->GetObj(klass);
- EXPECT_TRUE(s0 != NULL);
+ EXPECT_TRUE(s0 != nullptr);
Handle<CharArray> char_array(hs.NewHandle(CharArray::Alloc(soa.Self(), 0)));
field->SetObj<false>(field->GetDeclaringClass(), char_array.Get());
EXPECT_EQ(char_array.Get(), field->GetObj(klass));
- field->SetObj<false>(field->GetDeclaringClass(), NULL);
- EXPECT_EQ(NULL, field->GetObj(klass));
+ field->SetObj<false>(field->GetDeclaringClass(), nullptr);
+ EXPECT_EQ(nullptr, field->GetObj(klass));
// TODO: more exhaustive tests of all 6 cases of ArtField::*FromCode
}
@@ -416,13 +418,15 @@ TEST_F(ObjectTest, String) {
AssertString(1, "\xc2\x80", "\x00\x80", 0x80);
AssertString(1, "\xd9\xa6", "\x06\x66", 0x0666);
AssertString(1, "\xdf\xbf", "\x07\xff", 0x07ff);
- AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69", (31 * ((31 * 0x68) + 0x0666)) + 0x69);
+ AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69",
+ (31 * ((31 * 0x68) + 0x0666)) + 0x69);
// Test three-byte characters.
AssertString(1, "\xe0\xa0\x80", "\x08\x00", 0x0800);
AssertString(1, "\xe1\x88\xb4", "\x12\x34", 0x1234);
AssertString(1, "\xef\xbf\xbf", "\xff\xff", 0xffff);
- AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69", (31 * ((31 * 0x68) + 0x1234)) + 0x69);
+ AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69",
+ (31 * ((31 * 0x68) + 0x1234)) + 0x69);
// Test four-byte characters.
AssertString(2, "\xf0\x9f\x8f\xa0", "\xd8\x3c\xdf\xe0", (31 * 0xd83c) + 0xdfe0);
@@ -507,9 +511,9 @@ TEST_F(ObjectTest, DescriptorCompare) {
Handle<ClassLoader> class_loader_2(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_2)));
Class* klass1 = linker->FindClass(soa.Self(), "LProtoCompare;", class_loader_1);
- ASSERT_TRUE(klass1 != NULL);
+ ASSERT_TRUE(klass1 != nullptr);
Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2);
- ASSERT_TRUE(klass2 != NULL);
+ ASSERT_TRUE(klass2 != nullptr);
ArtMethod* m1_1 = klass1->GetVirtualMethod(0);
EXPECT_STREQ(m1_1->GetName(), "m1");
@@ -550,13 +554,13 @@ TEST_F(ObjectTest, InstanceOf) {
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
- ASSERT_TRUE(X != NULL);
- ASSERT_TRUE(Y != NULL);
+ ASSERT_TRUE(X != nullptr);
+ ASSERT_TRUE(Y != nullptr);
Handle<Object> x(hs.NewHandle(X->AllocObject(soa.Self())));
Handle<Object> y(hs.NewHandle(Y->AllocObject(soa.Self())));
- ASSERT_TRUE(x.Get() != NULL);
- ASSERT_TRUE(y.Get() != NULL);
+ ASSERT_TRUE(x.Get() != nullptr);
+ ASSERT_TRUE(y.Get() != nullptr);
EXPECT_TRUE(x->InstanceOf(X));
EXPECT_FALSE(x->InstanceOf(Y));
@@ -571,8 +575,10 @@ TEST_F(ObjectTest, InstanceOf) {
// All array classes implement Cloneable and Serializable.
Object* array = ObjectArray<Object>::Alloc(soa.Self(), Object_array_class, 1);
- Class* java_lang_Cloneable = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
- Class* java_io_Serializable = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
+ Class* java_lang_Cloneable =
+ class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
+ Class* java_io_Serializable =
+ class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
EXPECT_TRUE(array->InstanceOf(java_lang_Cloneable));
EXPECT_TRUE(array->InstanceOf(java_io_Serializable));
}
@@ -622,35 +628,35 @@ TEST_F(ObjectTest, IsAssignableFromArray) {
Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
- ASSERT_TRUE(X != NULL);
- ASSERT_TRUE(Y != NULL);
+ ASSERT_TRUE(X != nullptr);
+ ASSERT_TRUE(Y != nullptr);
Class* YA = class_linker_->FindClass(soa.Self(), "[LY;", class_loader);
Class* YAA = class_linker_->FindClass(soa.Self(), "[[LY;", class_loader);
- ASSERT_TRUE(YA != NULL);
- ASSERT_TRUE(YAA != NULL);
+ ASSERT_TRUE(YA != nullptr);
+ ASSERT_TRUE(YAA != nullptr);
Class* XAA = class_linker_->FindClass(soa.Self(), "[[LX;", class_loader);
- ASSERT_TRUE(XAA != NULL);
+ ASSERT_TRUE(XAA != nullptr);
Class* O = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
Class* OA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
Class* OAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
Class* OAAA = class_linker_->FindSystemClass(soa.Self(), "[[[Ljava/lang/Object;");
- ASSERT_TRUE(O != NULL);
- ASSERT_TRUE(OA != NULL);
- ASSERT_TRUE(OAA != NULL);
- ASSERT_TRUE(OAAA != NULL);
+ ASSERT_TRUE(O != nullptr);
+ ASSERT_TRUE(OA != nullptr);
+ ASSERT_TRUE(OAA != nullptr);
+ ASSERT_TRUE(OAAA != nullptr);
Class* S = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
Class* SA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/io/Serializable;");
Class* SAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/io/Serializable;");
- ASSERT_TRUE(S != NULL);
- ASSERT_TRUE(SA != NULL);
- ASSERT_TRUE(SAA != NULL);
+ ASSERT_TRUE(S != nullptr);
+ ASSERT_TRUE(SA != nullptr);
+ ASSERT_TRUE(SAA != nullptr);
Class* IA = class_linker_->FindSystemClass(soa.Self(), "[I");
- ASSERT_TRUE(IA != NULL);
+ ASSERT_TRUE(IA != nullptr);
EXPECT_TRUE(YAA->IsAssignableFrom(YAA)); // identity
EXPECT_TRUE(XAA->IsAssignableFrom(YAA)); // element superclass
@@ -673,60 +679,62 @@ TEST_F(ObjectTest, FindInstanceField) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
- ASSERT_TRUE(s.Get() != NULL);
+ ASSERT_TRUE(s.Get() != nullptr);
Class* c = s->GetClass();
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
// Wrong type.
- EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == NULL);
- EXPECT_TRUE(c->FindInstanceField("count", "J") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == nullptr);
+ EXPECT_TRUE(c->FindInstanceField("count", "J") == nullptr);
// Wrong name.
- EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == NULL);
- EXPECT_TRUE(c->FindInstanceField("Count", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == nullptr);
+ EXPECT_TRUE(c->FindInstanceField("Count", "I") == nullptr);
// Right name and type.
ArtField* f1 = c->FindDeclaredInstanceField("count", "I");
ArtField* f2 = c->FindInstanceField("count", "I");
- EXPECT_TRUE(f1 != NULL);
- EXPECT_TRUE(f2 != NULL);
+ EXPECT_TRUE(f1 != nullptr);
+ EXPECT_TRUE(f2 != nullptr);
EXPECT_EQ(f1, f2);
// TODO: check that s.count == 3.
// Ensure that we handle superclass fields correctly...
c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/StringBuilder;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
// No StringBuilder.count...
- EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == nullptr);
// ...but there is an AbstractStringBuilder.count.
- EXPECT_TRUE(c->FindInstanceField("count", "I") != NULL);
+ EXPECT_TRUE(c->FindInstanceField("count", "I") != nullptr);
}
TEST_F(ObjectTest, FindStaticField) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<4> hs(soa.Self());
Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
- ASSERT_TRUE(s.Get() != NULL);
+ ASSERT_TRUE(s.Get() != nullptr);
Handle<Class> c(hs.NewHandle(s->GetClass()));
- ASSERT_TRUE(c.Get() != NULL);
+ ASSERT_TRUE(c.Get() != nullptr);
// Wrong type.
- EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL);
- EXPECT_TRUE(mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == nullptr);
+ EXPECT_TRUE(mirror::Class::FindStaticField(
+ soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == nullptr);
// Wrong name.
- EXPECT_TRUE(c->FindDeclaredStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL);
+ EXPECT_TRUE(c->FindDeclaredStaticField(
+ "cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == nullptr);
EXPECT_TRUE(
mirror::Class::FindStaticField(soa.Self(), c, "cASE_INSENSITIVE_ORDER",
- "Ljava/util/Comparator;") == NULL);
+ "Ljava/util/Comparator;") == nullptr);
// Right name and type.
ArtField* f1 = c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
ArtField* f2 = mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER",
"Ljava/util/Comparator;");
- EXPECT_TRUE(f1 != NULL);
- EXPECT_TRUE(f2 != NULL);
+ EXPECT_TRUE(f1 != nullptr);
+ EXPECT_TRUE(f2 != nullptr);
EXPECT_EQ(f1, f2);
// TODO: test static fields via superclasses.
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index ec2b495..96f6a53 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -30,7 +30,7 @@ GcRoot<Class> StackTraceElement::java_lang_StackTraceElement_;
void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
CHECK(java_lang_StackTraceElement_.IsNull());
- CHECK(java_lang_StackTraceElement != NULL);
+ CHECK(java_lang_StackTraceElement != nullptr);
java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
}
@@ -44,7 +44,7 @@ StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declari
int32_t line_number) {
StackTraceElement* trace =
down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
- if (LIKELY(trace != NULL)) {
+ if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
trace->Init<true>(declaring_class, method_name, file_name, line_number);
} else {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 4a95519..b367cff 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -50,7 +50,7 @@ inline int32_t String::GetLength() {
inline void String::SetArray(CharArray* new_array) {
// Array is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
- DCHECK(new_array != NULL);
+ DCHECK(new_array != nullptr);
SetFieldObject<false, false>(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array);
}
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index bd6a63c..b7fd240 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -53,7 +53,7 @@ int32_t String::FastIndexOf(int32_t ch, int32_t start) {
void String::SetClass(Class* java_lang_String) {
CHECK(java_lang_String_.IsNull());
- CHECK(java_lang_String != NULL);
+ CHECK(java_lang_String != nullptr);
java_lang_String_ = GcRoot<Class>(java_lang_String);
}
@@ -137,7 +137,7 @@ bool String::Equals(String* that) {
if (this == that) {
// Quick reference equality test
return true;
- } else if (that == NULL) {
+ } else if (that == nullptr) {
// Null isn't an instanceof anything
return false;
} else if (this->GetLength() != that->GetLength()) {
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index b564649..ca94644 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -45,7 +45,7 @@ void Throwable::SetCause(Throwable* cause) {
CHECK(cause != nullptr);
CHECK(cause != this);
Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
- CHECK(current_cause == NULL || current_cause == this);
+ CHECK(current_cause == nullptr || current_cause == this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause);
} else {
@@ -80,7 +80,7 @@ std::string Throwable::Dump() {
std::string result(PrettyTypeOf(this));
result += ": ";
String* msg = GetDetailMessage();
- if (msg != NULL) {
+ if (msg != nullptr) {
result += msg->ToModifiedUtf8();
}
result += "\n";
@@ -135,7 +135,7 @@ std::string Throwable::Dump() {
void Throwable::SetClass(Class* java_lang_Throwable) {
CHECK(java_lang_Throwable_.IsNull());
- CHECK(java_lang_Throwable != NULL);
+ CHECK(java_lang_Throwable != nullptr);
java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 1a80ded..4b41225 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -68,11 +68,11 @@ static constexpr uint64_t kLongWaitMs = 100;
* at any given time.
*/
-bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
+bool (*Monitor::is_sensitive_thread_hook_)() = nullptr;
uint32_t Monitor::lock_profiling_threshold_ = 0;
bool Monitor::IsSensitiveThread() {
- if (is_sensitive_thread_hook_ != NULL) {
+ if (is_sensitive_thread_hook_ != nullptr) {
return (*is_sensitive_thread_hook_)();
}
return false;
@@ -90,9 +90,9 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
owner_(owner),
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
- wait_set_(NULL),
+ wait_set_(nullptr),
hash_code_(hash_code),
- locking_method_(NULL),
+ locking_method_(nullptr),
locking_dex_pc_(0),
monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
#ifdef __LP64__
@@ -113,9 +113,9 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
owner_(owner),
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
- wait_set_(NULL),
+ wait_set_(nullptr),
hash_code_(hash_code),
- locking_method_(NULL),
+ locking_method_(nullptr),
locking_dex_pc_(0),
monitor_id_(id) {
#ifdef __LP64__
@@ -183,9 +183,9 @@ Monitor::~Monitor() {
void Monitor::AppendToWaitSet(Thread* thread) {
DCHECK(owner_ == Thread::Current());
- DCHECK(thread != NULL);
+ DCHECK(thread != nullptr);
DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
- if (wait_set_ == NULL) {
+ if (wait_set_ == nullptr) {
wait_set_ = thread;
return;
}
@@ -200,8 +200,8 @@ void Monitor::AppendToWaitSet(Thread* thread) {
void Monitor::RemoveFromWaitSet(Thread *thread) {
DCHECK(owner_ == Thread::Current());
- DCHECK(thread != NULL);
- if (wait_set_ == NULL) {
+ DCHECK(thread != nullptr);
+ if (wait_set_ == nullptr) {
return;
}
if (wait_set_ == thread) {
@@ -211,7 +211,7 @@ void Monitor::RemoveFromWaitSet(Thread *thread) {
}
Thread* t = wait_set_;
- while (t->GetWaitNext() != NULL) {
+ while (t->GetWaitNext() != nullptr) {
if (t->GetWaitNext() == thread) {
t->SetWaitNext(thread->GetWaitNext());
thread->SetWaitNext(nullptr);
@@ -253,7 +253,8 @@ void Monitor::Lock(Thread* self) {
self->SetMonitorEnterObject(GetObject());
{
ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
- MutexLock mu2(self, monitor_lock_); // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+ // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+ MutexLock mu2(self, monitor_lock_);
if (owner_ != nullptr) { // Did the owner_ give the lock up?
if (ATRACE_ENABLED()) {
std::string name;
@@ -311,8 +312,8 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
}
static std::string ThreadToString(Thread* thread) {
- if (thread == NULL) {
- return "NULL";
+ if (thread == nullptr) {
+ return "nullptr";
}
std::ostringstream oss;
// TODO: alternatively, we could just return the thread's name.
@@ -322,7 +323,7 @@ static std::string ThreadToString(Thread* thread) {
void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* found_owner,
Monitor* monitor) {
- Thread* current_owner = NULL;
+ Thread* current_owner = nullptr;
std::string current_owner_string;
std::string expected_owner_string;
std::string found_owner_string;
@@ -331,14 +332,14 @@ void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* fo
// Acquire thread list lock so threads won't disappear from under us.
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
// Re-read owner now that we hold lock.
- current_owner = (monitor != NULL) ? monitor->GetOwner() : NULL;
+ current_owner = (monitor != nullptr) ? monitor->GetOwner() : nullptr;
// Get short descriptions of the threads involved.
current_owner_string = ThreadToString(current_owner);
expected_owner_string = ThreadToString(expected_owner);
found_owner_string = ThreadToString(found_owner);
}
- if (current_owner == NULL) {
- if (found_owner == NULL) {
+ if (current_owner == nullptr) {
+ if (found_owner == nullptr) {
ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
" on thread '%s'",
PrettyTypeOf(o).c_str(),
@@ -352,7 +353,7 @@ void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* fo
expected_owner_string.c_str());
}
} else {
- if (found_owner == NULL) {
+ if (found_owner == nullptr) {
// Race: originally there was no owner, there is now
ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
" (originally believed to be unowned) on thread '%s'",
@@ -380,14 +381,14 @@ void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* fo
}
bool Monitor::Unlock(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
Thread* owner = owner_;
if (owner == self) {
// We own the monitor, so nobody else can be in here.
if (lock_count_ == 0) {
- owner_ = NULL;
- locking_method_ = NULL;
+ owner_ = nullptr;
+ locking_method_ = nullptr;
locking_dex_pc_ = 0;
// Wake a contender.
monitor_contenders_.Signal(self);
@@ -406,7 +407,7 @@ bool Monitor::Unlock(Thread* self) {
void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
monitor_lock_.Lock(self);
@@ -446,9 +447,9 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
++num_waiters_;
int prev_lock_count = lock_count_;
lock_count_ = 0;
- owner_ = NULL;
+ owner_ = nullptr;
mirror::ArtMethod* saved_method = locking_method_;
- locking_method_ = NULL;
+ locking_method_ = nullptr;
uintptr_t saved_dex_pc = locking_dex_pc_;
locking_dex_pc_ = 0;
@@ -465,7 +466,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
MutexLock mu(self, *self->GetWaitMutex());
// Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
- // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
+ // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
// up.
DCHECK(self->GetWaitMonitor() == nullptr);
self->SetWaitMonitor(this);
@@ -538,13 +539,13 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
self->SetInterruptedLocked(false);
}
if (interruptShouldThrow) {
- self->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
+ self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
}
}
}
void Monitor::Notify(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
// Make sure that we hold the lock.
if (owner_ != self) {
@@ -552,7 +553,7 @@ void Monitor::Notify(Thread* self) {
return;
}
// Signal the first waiting thread in the wait set.
- while (wait_set_ != NULL) {
+ while (wait_set_ != nullptr) {
Thread* thread = wait_set_;
wait_set_ = thread->GetWaitNext();
thread->SetWaitNext(nullptr);
@@ -567,7 +568,7 @@ void Monitor::Notify(Thread* self) {
}
void Monitor::NotifyAll(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
// Make sure that we hold the lock.
if (owner_ != self) {
@@ -575,7 +576,7 @@ void Monitor::NotifyAll(Thread* self) {
return;
}
// Signal all threads in the wait set.
- while (wait_set_ != NULL) {
+ while (wait_set_ != nullptr) {
Thread* thread = wait_set_;
wait_set_ = thread->GetWaitNext();
thread->SetWaitNext(nullptr);
@@ -625,7 +626,7 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated" << obj << " to empty lock word";
}
- // The monitor is deflated, mark the object as nullptr so that we know to delete it during the
+ // The monitor is deflated, mark the object as null so that we know to delete it during the
// next GC.
monitor->obj_ = GcRoot<mirror::Object>(nullptr);
}
@@ -697,8 +698,8 @@ static mirror::Object* FakeUnlock(mirror::Object* obj)
}
mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
obj = FakeLock(obj);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
@@ -772,8 +773,8 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
}
bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
obj = FakeUnlock(obj);
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
@@ -979,11 +980,11 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
// This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
// definition of contended that includes a monitor a thread is trying to enter...
mirror::Object* result = thread->GetMonitorEnterObject();
- if (result == NULL) {
+ if (result == nullptr) {
// ...but also a monitor that the thread is waiting on.
MutexLock mu(Thread::Current(), *thread->GetWaitMutex());
Monitor* monitor = thread->GetWaitMonitor();
- if (monitor != NULL) {
+ if (monitor != nullptr) {
result = monitor->GetObject();
}
}
@@ -993,7 +994,7 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
void* callback_context, bool abort_on_failure) {
mirror::ArtMethod* m = stack_visitor->GetMethod();
- CHECK(m != NULL);
+ CHECK(m != nullptr);
// Native methods are an easy special case.
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
@@ -1013,7 +1014,7 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::O
// Is there any reason to believe there's any synchronization in this method?
const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != NULL) << PrettyMethod(m);
+ CHECK(code_item != nullptr) << PrettyMethod(m);
if (code_item->tries_size_ == 0) {
return; // No "tries" implies no synchronization, so no held locks to report.
}
@@ -1088,13 +1089,13 @@ bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc,
const char** source_file, uint32_t* line_number) const {
// If method is null, location is unknown
- if (method == NULL) {
+ if (method == nullptr) {
*source_file = "";
*line_number = 0;
return;
}
*source_file = method->GetDeclaringClassSourceFile();
- if (*source_file == NULL) {
+ if (*source_file == nullptr) {
*source_file = "";
}
*line_number = method->GetLineNumFromDexPC(dex_pc);
@@ -1103,7 +1104,7 @@ void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc,
uint32_t Monitor::GetOwnerThreadId() {
MutexLock mu(Thread::Current(), monitor_lock_);
Thread* owner = owner_;
- if (owner != NULL) {
+ if (owner != nullptr) {
return owner->GetThreadId();
} else {
return ThreadList::kInvalidThreadId;
@@ -1185,7 +1186,7 @@ static mirror::Object* MonitorDeflateCallback(mirror::Object* object, void* arg)
if (Monitor::Deflate(args->self, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
++args->deflate_count;
- // If we deflated, return nullptr so that the monitor gets removed from the array.
+ // If we deflated, return null so that the monitor gets removed from the array.
return nullptr;
}
return object; // Monitor was not deflated.
@@ -1198,7 +1199,7 @@ size_t MonitorList::DeflateMonitors() {
return args.deflate_count;
}
-MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
+MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) {
DCHECK(obj != nullptr);
LockWord lock_word = obj->GetLockWord(true);
switch (lock_word.GetState()) {
@@ -1217,7 +1218,7 @@ MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
Monitor* mon = lock_word.FatLockMonitor();
owner_ = mon->owner_;
entry_count_ = 1 + mon->lock_count_;
- for (Thread* waiter = mon->wait_set_; waiter != NULL; waiter = waiter->GetWaitNext()) {
+ for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
waiters_.push_back(waiter);
}
break;
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index d89290b..48c9cce 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -88,7 +88,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample
cp = EventLogWriteInt(cp, line_number);
// Emit the lock owner source code file name, <= 37 bytes.
- if (owner_filename == NULL) {
+ if (owner_filename == nullptr) {
owner_filename = "";
} else if (strcmp(filename, owner_filename) == 0) {
// Common case, so save on log space.
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 8ae5a54..4ab4e86 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -138,7 +138,8 @@ class MonitorPool {
for (size_t index = 0; index < num_chunks_; ++index) {
uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
if (IsInChunk(chunk_addr, mon)) {
- return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
+ return OffsetToMonitorId(
+ reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
}
}
LOG(FATAL) << "Did not find chunk that contains monitor.";
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 2351463..30cb2d8 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -116,8 +116,8 @@ class CreateTask : public Task {
ScopedObjectAccess soa(self);
monitor_test_->thread_ = self; // Pass the Thread.
- monitor_test_->object_.Get()->MonitorEnter(self); // Lock the object. This should transition
- LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked.
+ monitor_test_->object_.Get()->MonitorEnter(self); // Lock the object. This should transition
+ LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked.
LockWord::LockState new_state = lock_after.GetState();
// Cannot use ASSERT only, as analysis thinks we'll keep holding the mutex.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 87ae64d..4f97d20 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -109,7 +109,7 @@ static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
//
// NullableScopedUtfChars name(env, javaName);
// if (env->ExceptionCheck()) {
-// return NULL;
+// return null;
// }
// // ... use name.c_str()
//
@@ -117,7 +117,7 @@ static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
class NullableScopedUtfChars {
public:
NullableScopedUtfChars(JNIEnv* env, jstring s) : mEnv(env), mString(s) {
- mUtfChars = (s != NULL) ? env->GetStringUTFChars(s, NULL) : NULL;
+ mUtfChars = (s != nullptr) ? env->GetStringUTFChars(s, nullptr) : nullptr;
}
~NullableScopedUtfChars() {
@@ -149,9 +149,10 @@ class NullableScopedUtfChars {
void operator=(const NullableScopedUtfChars&);
};
-static jobject DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jobject DexFile_openDexFileNative(
+ JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
ScopedUtfChars sourceName(env, javaSourceName);
- if (sourceName.c_str() == NULL) {
+ if (sourceName.c_str() == nullptr) {
return 0;
}
NullableScopedUtfChars outputName(env, javaOutputName);
@@ -224,9 +225,9 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j
}
ScopedUtfChars class_name(env, javaName);
- if (class_name.c_str() == NULL) {
+ if (class_name.c_str() == nullptr) {
VLOG(class_linker) << "Failed to find class_name";
- return NULL;
+ return nullptr;
}
const std::string descriptor(DotToDescriptor(class_name.c_str()));
const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
@@ -367,7 +368,7 @@ static jint DexFile_getDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename,
instruction_set.c_str(), defer);
}
-// public API, NULL pkgname
+// public API, null pkgname
static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
const char* instruction_set = GetInstructionSetString(kRuntimeISA);
ScopedUtfChars filename(env, javaFilename);
@@ -378,11 +379,14 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)V"),
- NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
+ NATIVE_METHOD(DexFile, defineClassNative,
+ "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
- NATIVE_METHOD(DexFile, getDexOptNeeded, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
- NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
+ NATIVE_METHOD(DexFile, getDexOptNeeded,
+ "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
+ NATIVE_METHOD(DexFile, openDexFileNative,
+ "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 876e29a..46881b0 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -104,7 +104,7 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF
}
ScopedUtfChars traceFilename(env, javaTraceFilename);
- if (traceFilename.c_str() == NULL) {
+ if (traceFilename.c_str() == nullptr) {
return;
}
Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -116,7 +116,7 @@ static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring java
jint bufferSize, jint flags,
jboolean samplingEnabled, jint intervalUs) {
ScopedUtfChars traceFilename(env, javaTraceFilename);
- if (traceFilename.c_str() == NULL) {
+ if (traceFilename.c_str() == nullptr) {
return;
}
Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -156,7 +156,7 @@ static jlong VMDebug_lastDebuggerActivity(JNIEnv*, jclass) {
static void ThrowUnsupportedOperationException(JNIEnv* env) {
ScopedObjectAccess soa(env);
- soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", NULL);
+ soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", nullptr);
}
static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) {
@@ -200,15 +200,15 @@ static jlong VMDebug_threadCpuTimeNanos(JNIEnv*, jclass) {
* error occurs during file handling.
*/
static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) {
- // Only one of these may be NULL.
- if (javaFilename == NULL && javaFd == NULL) {
+ // Only one of these may be null.
+ if (javaFilename == nullptr && javaFd == nullptr) {
ScopedObjectAccess soa(env);
ThrowNullPointerException("fileName == null && fd == null");
return;
}
std::string filename;
- if (javaFilename != NULL) {
+ if (javaFilename != nullptr) {
ScopedUtfChars chars(env, javaFilename);
if (env->ExceptionCheck()) {
return;
@@ -219,7 +219,7 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job
}
int fd = -1;
- if (javaFd != NULL) {
+ if (javaFd != nullptr) {
fd = jniGetFDFromFileDescriptor(env, javaFd);
if (fd < 0) {
ScopedObjectAccess soa(env);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 196a231..53bb129 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -114,7 +114,7 @@ static jobject VMRuntime_newUnpaddedArray(JNIEnv* env, jobject, jclass javaEleme
}
static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
- if (javaArray == NULL) { // Most likely allocation failed
+ if (javaArray == nullptr) { // Most likely allocation failed
return 0;
}
ScopedFastNativeObjectAccess soa(env);
@@ -263,17 +263,17 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
};
// Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, uint32_t string_idx,
- StringTable& strings)
+static void PreloadDexCachesResolveString(
+ Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
- if (string != NULL) {
+ if (string != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
const char* utf8 = dex_file->StringDataByIdx(string_idx);
string = strings[utf8];
- if (string == NULL) {
+ if (string == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved string=" << utf8;
@@ -281,10 +281,11 @@ static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, ui
}
// Based on ClassLinker::ResolveType.
-static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
+static void PreloadDexCachesResolveType(
+ Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != NULL) {
+ if (klass != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -293,9 +294,9 @@ static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cach
if (class_name[1] == '\0') {
klass = linker->FindPrimitiveClass(class_name[0]);
} else {
- klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), NULL);
+ klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), nullptr);
}
- if (klass == NULL) {
+ if (klass == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved klass=" << class_name;
@@ -321,7 +322,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin
Thread* const self = Thread::Current();
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(hs.NewHandle(dex_cache->GetResolvedType(field_id.class_idx_)));
- if (klass.Get() == NULL) {
+ if (klass.Get() == nullptr) {
return;
}
if (is_static) {
@@ -329,7 +330,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin
} else {
field = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
- if (field == NULL) {
+ if (field == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved field " << PrettyField(field);
@@ -341,13 +342,13 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui
InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
- if (method != NULL) {
+ if (method != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
mirror::Class* klass = dex_cache->GetResolvedType(method_id.class_idx_);
- if (klass == NULL) {
+ if (klass == nullptr) {
return;
}
switch (invoke_type) {
@@ -366,7 +367,7 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
UNREACHABLE();
}
- if (method == NULL) {
+ if (method == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method);
@@ -404,7 +405,7 @@ static void PreloadDexCachesStatsTotal(DexCacheStats* total) {
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
total->num_strings += dex_file->NumStringIds();
total->num_fields += dex_file->NumFieldIds();
total->num_methods += dex_file->NumMethodIds();
@@ -421,29 +422,29 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
mirror::String* string = dex_cache->GetResolvedString(j);
- if (string != NULL) {
+ if (string != nullptr) {
filled->num_strings++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
mirror::Class* klass = dex_cache->GetResolvedType(j);
- if (klass != NULL) {
+ if (klass != nullptr) {
filled->num_types++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
ArtField* field = linker->GetResolvedField(j, dex_cache);
- if (field != NULL) {
+ if (field != nullptr) {
filled->num_fields++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j);
- if (method != NULL) {
+ if (method != nullptr) {
filled->num_methods++;
}
}
@@ -482,7 +483,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
@@ -504,7 +505,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == NULL) {
+ if (class_data == nullptr) {
continue;
}
ClassDataItemIterator it(*dex_file, class_data);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 51a897d..b0d923b 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -42,7 +42,7 @@ ALWAYS_INLINE static inline mirror::Class* DecodeClass(
const ScopedFastNativeObjectAccess& soa, jobject java_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
- DCHECK(c != NULL);
+ DCHECK(c != nullptr);
DCHECK(c->IsClass());
// TODO: we could EnsureInitialized here, rather than on every reflective get/set or invoke .
// For now, we conservatively preserve the old dalvik behavior. A quick "IsInitialized" check
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 1198c2e..b9f8d01 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -31,14 +31,14 @@ static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) {
// Should only be called while holding the lock on the dex cache.
DCHECK_EQ(dex_cache->GetLockOwnerThreadId(), soa.Self()->GetThreadId());
const DexFile* dex_file = dex_cache->GetDexFile();
- if (dex_file == NULL) {
- return NULL;
+ if (dex_file == nullptr) {
+ return nullptr;
}
void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size());
- if (byte_buffer == NULL) {
+ if (byte_buffer == nullptr) {
DCHECK(soa.Self()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
jvalue args[1];
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 6afe83b..2d153d4 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -28,7 +28,7 @@ namespace art {
static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) {
ScopedFastNativeObjectAccess soa(env);
- if (UNLIKELY(javaRhs == NULL)) {
+ if (UNLIKELY(javaRhs == nullptr)) {
ThrowNullPointerException("rhs == null");
return -1;
} else {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index d3b52ba..be7022e 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -43,7 +43,7 @@ static jboolean Thread_isInterrupted(JNIEnv* env, jobject java_thread) {
ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
+ return (thread != nullptr) ? thread->IsInterrupted() : JNI_FALSE;
}
static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size,
@@ -64,7 +64,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
internal_thread_state = thread->GetState();
}
switch (internal_thread_state) {
@@ -99,7 +99,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject java_object) {
ScopedObjectAccess soa(env);
mirror::Object* object = soa.Decode<mirror::Object*>(java_object);
- if (object == NULL) {
+ if (object == nullptr) {
ThrowNullPointerException("object == null");
return JNI_FALSE;
}
@@ -112,7 +112,7 @@ static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) {
ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
thread->Interrupt(soa.Self());
}
}
@@ -133,7 +133,7 @@ static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- if (thread != NULL) {
+ if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
thread->SetThreadName(name.c_str());
@@ -154,7 +154,7 @@ static void Thread_nativeSetPriority(JNIEnv* env, jobject java_thread, jint new_
ScopedObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
thread->SetNativePriority(new_priority);
}
}
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index eddd7de..beb953b 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -27,13 +27,14 @@
namespace art {
-static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
+static jobject Array_createMultiArray(
+ JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK(javaElementClass != NULL);
+ DCHECK(javaElementClass != nullptr);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> element_class(hs.NewHandle(soa.Decode<mirror::Class*>(javaElementClass)));
DCHECK(element_class->IsClass());
- DCHECK(javaDimArray != NULL);
+ DCHECK(javaDimArray != nullptr);
mirror::Object* dimensions_obj = soa.Decode<mirror::Object*>(javaDimArray);
DCHECK(dimensions_obj->IsArrayInstance());
DCHECK_EQ(dimensions_obj->GetClass()->GetComponentType()->GetPrimitiveType(),
@@ -47,18 +48,18 @@ static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementCla
static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK(javaElementClass != NULL);
+ DCHECK(javaElementClass != nullptr);
if (UNLIKELY(length < 0)) {
ThrowNegativeArraySizeException(length);
- return NULL;
+ return nullptr;
}
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
- if (UNLIKELY(array_class == NULL)) {
+ if (UNLIKELY(array_class == nullptr)) {
CHECK(soa.Self()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
DCHECK(array_class->IsObjectArrayClass());
mirror::Array* new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 987427e..b96ddc8 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -43,7 +43,7 @@ static jboolean DdmVmInternal_getRecentAllocationStatus(JNIEnv*, jclass) {
/*
* Get a stack trace as an array of StackTraceElement objects. Returns
- * NULL on failure, e.g. if the threadId couldn't be found.
+ * nullptr on failure, e.g. if the threadId couldn't be found.
*/
static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
jobjectArray trace = nullptr;
@@ -145,7 +145,7 @@ static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) {
}
jbyteArray result = env->NewByteArray(bytes.size());
- if (result != NULL) {
+ if (result != nullptr) {
env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
}
return result;
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index a851f21..632ccde 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -27,20 +27,20 @@ class Thread;
// Walks up the stack 'n' callers, when used with Thread::WalkStack.
struct NthCallerVisitor : public StackVisitor {
NthCallerVisitor(Thread* thread, size_t n_in, bool include_runtime_and_upcalls = false)
- : StackVisitor(thread, NULL), n(n_in),
- include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(NULL) {}
+ : StackVisitor(thread, nullptr), n(n_in),
+ include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(nullptr) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
bool do_count = false;
- if (m == NULL || m->IsRuntimeMethod()) {
+ if (m == nullptr || m->IsRuntimeMethod()) {
// Upcall.
do_count = include_runtime_and_upcalls_;
} else {
do_count = true;
}
if (do_count) {
- DCHECK(caller == NULL);
+ DCHECK(caller == nullptr);
if (count == n) {
caller = m;
return false;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index eddbd8a..b0cbd0e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -497,7 +497,7 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
MutexLock mu(Thread::Current(), secondary_lookup_lock_);
auto secondary_lb = secondary_oat_dex_files_.lower_bound(key);
if (secondary_lb != secondary_oat_dex_files_.end() && key == secondary_lb->first) {
- oat_dex_file = secondary_lb->second; // May be nullptr.
+ oat_dex_file = secondary_lb->second; // May be null.
} else {
// We haven't seen this dex_location before, we must check the canonical location.
std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
@@ -506,8 +506,8 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
auto canonical_it = oat_dex_files_.find(canonical_key);
if (canonical_it != oat_dex_files_.end()) {
oat_dex_file = canonical_it->second;
- } // else keep nullptr.
- } // else keep nullptr.
+ } // else keep null.
+ } // else keep null.
// Copy the key to the string_cache_ and store the result in secondary map.
string_cache_.emplace_back(key.data(), key.length());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 42c60dc..b32dd22 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -48,7 +48,7 @@ class OatFile FINAL {
static OatFile* OpenWithElfFile(ElfFile* elf_file, const std::string& location,
const char* abs_dex_location,
std::string* error_msg);
- // Open an oat file. Returns NULL on failure. Requested base can
+ // Open an oat file. Returns null on failure. Requested base can
// optionally be used to request where the file should be loaded.
// See the ResolveRelativeEncodedDexLocation for a description of how the
// abs_dex_location argument is used.
@@ -149,7 +149,7 @@ class OatFile FINAL {
template<class T>
T GetOatPointer(uint32_t offset) const {
if (offset == 0) {
- return NULL;
+ return nullptr;
}
return reinterpret_cast<T>(begin_ + offset);
}
@@ -177,7 +177,7 @@ class OatFile FINAL {
const OatMethod GetOatMethod(uint32_t method_index) const;
// Return a pointer to the OatMethodOffsets for the requested
- // method_index, or nullptr if none is present. Note that most
+ // method_index, or null if none is present. Note that most
// callers should use GetOatMethod.
const OatMethodOffsets* GetOatMethodOffsets(uint32_t method_index) const;
@@ -238,7 +238,7 @@ class OatFile FINAL {
// Returns the absolute dex location for the encoded relative dex location.
//
- // If not nullptr, abs_dex_location is used to resolve the absolute dex
+ // If not null, abs_dex_location is used to resolve the absolute dex
// location of relative dex locations encoded in the oat file.
// For example, given absolute location "/data/app/foo/base.apk", encoded
// dex locations "base.apk", "base.apk:classes2.dex", etc. would be resolved
@@ -300,10 +300,10 @@ class OatFile FINAL {
// Pointer to end of oat region for bounds checking.
const uint8_t* end_;
- // Pointer to the .bss section, if present, otherwise nullptr.
+ // Pointer to the .bss section, if present, otherwise null.
const uint8_t* bss_begin_;
- // Pointer to the end of the .bss section, if present, otherwise nullptr.
+ // Pointer to the end of the .bss section, if present, otherwise null.
const uint8_t* bss_end_;
// Was this oat_file loaded executable?
@@ -331,7 +331,7 @@ class OatFile FINAL {
// Map each location and canonical location (if different) retrieved from the
// oat file to its OatDexFile. This map doesn't change after it's constructed in Setup()
// and therefore doesn't need any locking and provides the cheapest dex file lookup
- // for GetOatDexFile() for a very frequent use case. Never contains a nullptr value.
+ // for GetOatDexFile() for a very frequent use case. Never contains a null value.
Table oat_dex_files_;
// Lock guarding all members needed for secondary lookup in GetOatDexFile().
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index e5c27b2..37e85ab 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -233,7 +233,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileAssistant::LoadDexFiles(
for (int i = 1; ; i++) {
std::string secondary_dex_location = DexFile::GetMultiDexClassesDexName(i, dex_location);
oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
- if (oat_dex_file == NULL) {
+ if (oat_dex_file == nullptr) {
// There are no more secondary dex files to load.
break;
}
@@ -393,12 +393,12 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
// Verify the dex checksum.
- // Note: GetOatDexFile will return NULL if the dex checksum doesn't match
+ // Note: GetOatDexFile will return null if the dex checksum doesn't match
// what we provide, which verifies the primary dex checksum for us.
const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
dex_location_, dex_checksum_pointer, false);
- if (oat_dex_file == NULL) {
+ if (oat_dex_file == nullptr) {
return true;
}
@@ -408,7 +408,7 @@ bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
= DexFile::GetMultiDexClassesDexName(i, dex_location_);
const OatFile::OatDexFile* secondary_oat_dex_file
= file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
- if (secondary_oat_dex_file == NULL) {
+ if (secondary_oat_dex_file == nullptr) {
// There are no more secondary dex files to check.
break;
}
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 9e7c2ef..a25ee31 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -85,7 +85,7 @@ class OatFileAssistant {
// Constructs an OatFileAssistant object to assist the oat file
// corresponding to the given dex location with the target instruction set.
//
- // The dex_location must not be NULL and should remain available and
+ // The dex_location must not be null and should remain available and
// unchanged for the duration of the lifetime of the OatFileAssistant object.
// Typically the dex_location is the absolute path to the original,
// un-optimized dex file.
@@ -152,11 +152,11 @@ class OatFileAssistant {
// Returns true on success.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool MakeUpToDate(std::string* error_msg);
// Returns an oat file that can be used for loading dex files.
- // Returns nullptr if no suitable oat file was found.
+ // Returns null if no suitable oat file was found.
//
// After this call, no other methods of the OatFileAssistant should be
// called, because access to the loaded oat file has been taken away from
@@ -244,7 +244,7 @@ class OatFileAssistant {
// This will fail if dex2oat is not enabled in the current runtime.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool RelocateOatFile(const std::string* input_file, std::string* error_msg);
// Generate the oat file from the dex file.
@@ -254,7 +254,7 @@ class OatFileAssistant {
// This will fail if dex2oat is not enabled in the current runtime.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool GenerateOatFile(std::string* error_msg);
// Executes dex2oat using the current runtime configuration overridden with
@@ -263,7 +263,7 @@ class OatFileAssistant {
// Returns true on success.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
//
// TODO: The OatFileAssistant probably isn't the right place to have this
// function.
@@ -310,12 +310,12 @@ class OatFileAssistant {
// Gets the dex checksum required for an up-to-date oat file.
// Returns dex_checksum if a required checksum was located. Returns
- // nullptr if the required checksum was not found.
+ // null if the required checksum was not found.
// The caller shouldn't clean up or free the returned pointer.
const uint32_t* GetRequiredDexChecksum();
// Returns the loaded odex file.
- // Loads the file if needed. Returns nullptr if the file failed to load.
+ // Loads the file if needed. Returns null if the file failed to load.
// The caller shouldn't clean up or free the returned pointer.
const OatFile* GetOdexFile();
@@ -324,7 +324,7 @@ class OatFileAssistant {
void ClearOdexFileCache();
// Returns the loaded oat file.
- // Loads the file if needed. Returns nullptr if the file failed to load.
+ // Loads the file if needed. Returns null if the file failed to load.
// The caller shouldn't clean up or free the returned pointer.
const OatFile* GetOatFile();
@@ -333,19 +333,19 @@ class OatFileAssistant {
void ClearOatFileCache();
// Returns the loaded image info.
- // Loads the image info if needed. Returns nullptr if the image info failed
+ // Loads the image info if needed. Returns null if the image info failed
// to load.
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
// Returns the loaded profile.
- // Loads the profile if needed. Returns nullptr if the profile failed
+ // Loads the profile if needed. Returns null if the profile failed
// to load.
// The caller shouldn't clean up or free the returned pointer.
ProfileFile* GetProfile();
// Returns the loaded old profile.
- // Loads the old profile if needed. Returns nullptr if the old profile
+ // Loads the old profile if needed. Returns null if the old profile
// failed to load.
// The caller shouldn't clean up or free the returned pointer.
ProfileFile* GetOldProfile();
@@ -357,7 +357,7 @@ class OatFileAssistant {
ScopedFlock flock_;
// In a properly constructed OatFileAssistant object, dex_location_ should
- // never be nullptr.
+ // never be null.
const char* dex_location_ = nullptr;
// In a properly constructed OatFileAssistant object, isa_ should be either
@@ -365,7 +365,7 @@ class OatFileAssistant {
const InstructionSet isa_ = kNone;
// The package name, used solely to find the profile file.
- // This may be nullptr in a properly constructed object. In this case,
+ // This may be null in a properly constructed object. In this case,
// profile_load_attempted_ and old_profile_load_attempted_ will be true, and
// profile_load_succeeded_ and old_profile_load_succeeded_ will be false.
const char* package_name_ = nullptr;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 0c942d2..3f6b2d2 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -118,7 +118,7 @@ class OatFileAssistantTest : public CommonRuntimeTest {
std::string GetImageDirectory() {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != NULL);
+ CHECK(host_dir != nullptr);
return std::string(host_dir) + "/framework";
} else {
return std::string("/data/art-test");
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index cf81cc5..8e99dbb 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -41,9 +41,10 @@ typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
-typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref, void* arg);
+typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref,
+ void* arg);
-// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
+// A callback for testing if an object is marked, returns null if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index e4403d7..2282789 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -40,10 +40,10 @@ File* OS::CreateEmptyFile(const char* name) {
}
File* OS::OpenFileWithFlags(const char* name, int flags) {
- CHECK(name != NULL);
+ CHECK(name != nullptr);
std::unique_ptr<File> file(new File);
if (!file->Open(name, flags, 0666)) {
- return NULL;
+ return nullptr;
}
return file.release();
}
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 620a4bd..0bc834f 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -324,7 +324,7 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
} else if (option == "vfprintf") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("vfprintf argument was NULL");
+ Usage("vfprintf argument was nullptr");
return false;
}
int (*hook_vfprintf)(FILE *, const char*, va_list) =
@@ -337,7 +337,7 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
} else if (option == "exit") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("exit argument was NULL");
+ Usage("exit argument was nullptr");
return false;
}
void(*hook_exit)(jint) = reinterpret_cast<void(*)(jint)>(const_cast<void*>(hook));
@@ -348,7 +348,7 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
} else if (option == "abort") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("abort was NULL\n");
+ Usage("abort was nullptr\n");
return false;
}
void(*hook_abort)() = reinterpret_cast<void(*)()>(const_cast<void*>(hook));
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 658b656..a8575de 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -33,7 +33,6 @@ TEST_F(ParsedOptionsTest, ParsedOptions) {
void* test_vfprintf = reinterpret_cast<void*>(0xa);
void* test_abort = reinterpret_cast<void*>(0xb);
void* test_exit = reinterpret_cast<void*>(0xc);
- void* null = reinterpret_cast<void*>(NULL);
std::string lib_core(CommonRuntimeTest::GetLibCoreDexFileName());
@@ -42,27 +41,27 @@ TEST_F(ParsedOptionsTest, ParsedOptions) {
boot_class_path += lib_core;
RuntimeOptions options;
- options.push_back(std::make_pair(boot_class_path.c_str(), null));
- options.push_back(std::make_pair("-classpath", null));
- options.push_back(std::make_pair(lib_core.c_str(), null));
- options.push_back(std::make_pair("-cp", null));
- options.push_back(std::make_pair(lib_core.c_str(), null));
- options.push_back(std::make_pair("-Ximage:boot_image", null));
- options.push_back(std::make_pair("-Xcheck:jni", null));
- options.push_back(std::make_pair("-Xms2048", null));
- options.push_back(std::make_pair("-Xmx4k", null));
- options.push_back(std::make_pair("-Xss1m", null));
- options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", null));
- options.push_back(std::make_pair("-Dfoo=bar", null));
- options.push_back(std::make_pair("-Dbaz=qux", null));
- options.push_back(std::make_pair("-verbose:gc,class,jni", null));
+ options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
+ options.push_back(std::make_pair("-classpath", nullptr));
+ options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair("-cp", nullptr));
+ options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair("-Ximage:boot_image", nullptr));
+ options.push_back(std::make_pair("-Xcheck:jni", nullptr));
+ options.push_back(std::make_pair("-Xms2048", nullptr));
+ options.push_back(std::make_pair("-Xmx4k", nullptr));
+ options.push_back(std::make_pair("-Xss1m", nullptr));
+ options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", nullptr));
+ options.push_back(std::make_pair("-Dfoo=bar", nullptr));
+ options.push_back(std::make_pair("-Dbaz=qux", nullptr));
+ options.push_back(std::make_pair("-verbose:gc,class,jni", nullptr));
options.push_back(std::make_pair("vfprintf", test_vfprintf));
options.push_back(std::make_pair("abort", test_abort));
options.push_back(std::make_pair("exit", test_exit));
RuntimeArgumentMap map;
std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
- ASSERT_TRUE(parsed.get() != NULL);
+ ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
using Opt = RuntimeArgumentMap;
@@ -104,7 +103,7 @@ TEST_F(ParsedOptionsTest, ParsedOptionsGc) {
RuntimeArgumentMap map;
std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
- ASSERT_TRUE(parsed.get() != NULL);
+ ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
using Opt = RuntimeArgumentMap;
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 3818487..0ac5f40 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -132,7 +132,7 @@ class Primitive {
return "V";
default:
LOG(FATAL) << "Primitive char conversion on invalid type " << static_cast<int>(type);
- return NULL;
+ return nullptr;
}
}
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index db372c3..90a47b3 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -58,7 +58,7 @@ class BoundedStackVisitor : public StackVisitor {
BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
Thread* thread, uint32_t max_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
+ : StackVisitor(thread, nullptr), stack_(stack), max_depth_(max_depth), depth_(0) {
}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -399,7 +399,7 @@ BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
if (method == nullptr) {
profile_table_.NullMethod();
- // Don't record a nullptr method.
+ // Don't record a null method.
return false;
}
@@ -820,7 +820,7 @@ bool ProfileFile::LoadFile(const std::string& fileName) {
// Bad summary info. It should be total/null/boot.
return false;
}
- // This is the number of hits in all profiled methods (without nullptr or boot methods)
+ // This is the number of hits in all profiled methods (without null or boot methods)
uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
// Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index beba64f..a31d8ac 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -40,7 +40,7 @@ ReferenceTable::~ReferenceTable() {
}
void ReferenceTable::Add(mirror::Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
VerifyObject(obj);
if (entries_.size() >= max_size_) {
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
@@ -79,8 +79,8 @@ static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::
static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
int identical, int equiv)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj == NULL) {
- os << " NULL reference (count=" << equiv << ")\n";
+ if (obj == nullptr) {
+ os << " null reference (count=" << equiv << ")\n";
return;
}
if (Runtime::Current()->IsClearedJniWeakGlobal(obj)) {
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index db98e1f..4ffebf2 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -40,8 +40,8 @@ TEST_F(ReferenceTableTest, Basics) {
EXPECT_EQ(0U, rt.Size());
}
- // Check removal of all NULLs in a empty table is a no-op.
- rt.Remove(NULL);
+ // Check removal of all nullss in a empty table is a no-op.
+ rt.Remove(nullptr);
EXPECT_EQ(0U, rt.Size());
// Check removal of all o1 in a empty table is a no-op.
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 7aefdaa..a62bc5e 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -37,35 +37,35 @@ class ReflectionTest : public CommonCompilerTest {
// Turn on -verbose:jni for the JNI tests.
// gLogVerbosity.jni = true;
- vm_->AttachCurrentThread(&env_, NULL);
+ vm_->AttachCurrentThread(&env_, nullptr);
ScopedLocalRef<jclass> aioobe(env_,
env_->FindClass("java/lang/ArrayIndexOutOfBoundsException"));
- CHECK(aioobe.get() != NULL);
+ CHECK(aioobe.get() != nullptr);
aioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(aioobe.get()));
ScopedLocalRef<jclass> ase(env_, env_->FindClass("java/lang/ArrayStoreException"));
- CHECK(ase.get() != NULL);
+ CHECK(ase.get() != nullptr);
ase_ = reinterpret_cast<jclass>(env_->NewGlobalRef(ase.get()));
ScopedLocalRef<jclass> sioobe(env_,
env_->FindClass("java/lang/StringIndexOutOfBoundsException"));
- CHECK(sioobe.get() != NULL);
+ CHECK(sioobe.get() != nullptr);
sioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(sioobe.get()));
}
void CleanUpJniEnv() {
- if (aioobe_ != NULL) {
+ if (aioobe_ != nullptr) {
env_->DeleteGlobalRef(aioobe_);
- aioobe_ = NULL;
+ aioobe_ = nullptr;
}
- if (ase_ != NULL) {
+ if (ase_ != nullptr) {
env_->DeleteGlobalRef(ase_);
- ase_ = NULL;
+ ase_ = nullptr;
}
- if (sioobe_ != NULL) {
+ if (sioobe_ != nullptr) {
env_->DeleteGlobalRef(sioobe_);
- sioobe_ = NULL;
+ sioobe_ = nullptr;
}
}
@@ -105,7 +105,7 @@ class ReflectionTest : public CommonCompilerTest {
mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
class_loader);
- CHECK(c != NULL);
+ CHECK(c != nullptr);
*method = is_static ? c->FindDirectMethod(method_name, method_signature)
: c->FindVirtualMethod(method_name, method_signature);
@@ -501,10 +501,10 @@ TEST_F(ReflectionTest, StaticMainMethod) {
CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
- ASSERT_TRUE(klass != NULL);
+ ASSERT_TRUE(klass != nullptr);
mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
- ASSERT_TRUE(method != NULL);
+ ASSERT_TRUE(method != nullptr);
// Start runtime.
bool started = runtime_->Start();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2fc8d20..48bca62 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -289,7 +289,7 @@ struct AbortState {
}
gAborting++;
os << "Runtime aborting...\n";
- if (Runtime::Current() == NULL) {
+ if (Runtime::Current() == nullptr) {
os << "(Runtime does not yet exist!)\n";
return;
}
@@ -350,7 +350,7 @@ void Runtime::Abort() {
MutexLock mu(Thread::Current(), *Locks::abort_lock_);
// Get any pending output out of the way.
- fflush(NULL);
+ fflush(nullptr);
// Many people have difficulty distinguish aborts from crashes,
// so be explicit.
@@ -358,7 +358,7 @@ void Runtime::Abort() {
LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
// Call the abort hook if we have one.
- if (Runtime::Current() != NULL && Runtime::Current()->abort_ != NULL) {
+ if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
LOG(INTERNAL_FATAL) << "Calling abort hook...";
Runtime::Current()->abort_();
// notreached
@@ -386,7 +386,7 @@ void Runtime::PreZygoteFork() {
}
void Runtime::CallExitHook(jint status) {
- if (exit_ != NULL) {
+ if (exit_ != nullptr) {
ScopedThreadStateChange tsc(Thread::Current(), kNative);
exit_(status);
LOG(WARNING) << "Exit hook returned instead of exiting!";
@@ -401,16 +401,16 @@ void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) {
bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
// TODO: acquire a static mutex on Runtime to avoid racing.
- if (Runtime::instance_ != NULL) {
+ if (Runtime::instance_ != nullptr) {
return false;
}
- InitLogging(NULL); // Calls Locks::Init() as a side effect.
+ InitLogging(nullptr); // Calls Locks::Init() as a side effect.
instance_ = new Runtime;
if (!instance_->Init(options, ignore_unrecognized)) {
// TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
// leak memory, instead. Fix the destructor. b/19100793.
// delete instance_;
- instance_ = NULL;
+ instance_ = nullptr;
return false;
}
return true;
@@ -431,7 +431,7 @@ static jobject CreateSystemClassLoader(Runtime* runtime) {
mirror::ArtMethod* getSystemClassLoader =
class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
- CHECK(getSystemClassLoader != NULL);
+ CHECK(getSystemClassLoader != nullptr);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
@@ -447,7 +447,7 @@ static jobject CreateSystemClassLoader(Runtime* runtime) {
ArtField* contextClassLoader =
thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
- CHECK(contextClassLoader != NULL);
+ CHECK(contextClassLoader != nullptr);
// We can't run in a transaction yet.
contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
@@ -590,7 +590,7 @@ bool Runtime::InitZygote() {
// Mark rootfs as being a slave so that changes from default
// namespace only flow into our children.
- if (mount("rootfs", "/", NULL, (MS_SLAVE | MS_REC), NULL) == -1) {
+ if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE";
return false;
}
@@ -599,7 +599,7 @@ bool Runtime::InitZygote() {
// bind mount storage into their respective private namespaces, which
// are isolated from each other.
const char* target_base = getenv("EMULATED_STORAGE_TARGET");
- if (target_base != NULL) {
+ if (target_base != nullptr) {
if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
"uid=0,gid=1028,mode=0751") == -1) {
LOG(WARNING) << "Failed to mount tmpfs to " << target_base;
@@ -677,7 +677,7 @@ void Runtime::StartDaemonThreads() {
static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files,
size_t* failures) {
- DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
std::string system_filename;
bool has_system = false;
std::string cache_filename_unused;
@@ -737,7 +737,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
const std::vector<std::string>& dex_locations,
const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
size_t failure_count = 0;
if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
@@ -870,7 +870,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
// If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
// this case.
// If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
- // nullptr and we don't create the jit.
+ // null and we don't create the jit.
use_jit = false;
}
@@ -1129,26 +1129,26 @@ void Runtime::InitThreadGroups(Thread* self) {
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != nullptr || IsAotCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != nullptr || IsAotCompiler());
}
jobject Runtime::GetMainThreadGroup() const {
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != nullptr || IsAotCompiler());
return main_thread_group_;
}
jobject Runtime::GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != nullptr || IsAotCompiler());
return system_thread_group_;
}
jobject Runtime::GetSystemClassLoader() const {
- CHECK(system_class_loader_ != NULL || IsAotCompiler());
+ CHECK(system_class_loader_ != nullptr || IsAotCompiler());
return system_class_loader_;
}
@@ -1274,12 +1274,12 @@ void Runtime::BlockSignals() {
bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
+ return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
}
void Runtime::DetachCurrentThread() {
Thread* self = Thread::Current();
- if (self == NULL) {
+ if (self == nullptr) {
LOG(FATAL) << "attempting to detach thread that is not attached";
}
if (self->HasManagedStack()) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d95640d..c35f4ca 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -248,7 +248,7 @@ class Runtime {
}
InternTable* GetInternTable() const {
- DCHECK(intern_table_ != NULL);
+ DCHECK(intern_table_ != nullptr);
return intern_table_;
}
@@ -328,7 +328,7 @@ class Runtime {
void VisitNonConcurrentRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
+ // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -574,7 +574,7 @@ class Runtime {
void StartDaemonThreads();
void StartSignalCatcher();
- // A pointer to the active runtime or NULL.
+ // A pointer to the active runtime or null.
static Runtime* instance_;
// NOTE: these must match the gc::ProcessState values as they come directly from the framework.
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 35d944f..d65e18e 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -321,7 +321,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
OsInfo os_info;
const char* cmd_line = GetCmdLine();
- if (cmd_line == NULL) {
+ if (cmd_line == nullptr) {
cmd_line = "<unset>"; // Because no-one called InitLogging.
}
pid_t tid = GetTid();
@@ -353,9 +353,10 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
}
}
- if (getenv("debug_db_uid") != NULL || getenv("art_wait_for_gdb_on_crash") != NULL) {
+ if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
LOG(INTERNAL_FATAL) << "********************************************************\n"
- << "* Process " << getpid() << " thread " << tid << " \"" << thread_name << "\""
+ << "* Process " << getpid() << " thread " << tid << " \"" << thread_name
+ << "\""
<< " has been suspended while crashing.\n"
<< "* Attach gdb:\n"
<< "* gdb -p " << tid << "\n"
@@ -370,7 +371,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
memset(&action, 0, sizeof(action));
sigemptyset(&action.sa_mask);
action.sa_handler = SIG_DFL;
- sigaction(signal_number, &action, NULL);
+ sigaction(signal_number, &action, nullptr);
// ...and re-raise so we die with the appropriate status.
kill(getpid(), signal_number);
#else
@@ -390,19 +391,19 @@ void Runtime::InitPlatformSignalHandlers() {
action.sa_flags |= SA_ONSTACK;
int rc = 0;
- rc += sigaction(SIGABRT, &action, NULL);
- rc += sigaction(SIGBUS, &action, NULL);
- rc += sigaction(SIGFPE, &action, NULL);
- rc += sigaction(SIGILL, &action, NULL);
- rc += sigaction(SIGPIPE, &action, NULL);
- rc += sigaction(SIGSEGV, &action, NULL);
+ rc += sigaction(SIGABRT, &action, nullptr);
+ rc += sigaction(SIGBUS, &action, nullptr);
+ rc += sigaction(SIGFPE, &action, nullptr);
+ rc += sigaction(SIGILL, &action, nullptr);
+ rc += sigaction(SIGPIPE, &action, nullptr);
+ rc += sigaction(SIGSEGV, &action, nullptr);
#if defined(SIGSTKFLT)
- rc += sigaction(SIGSTKFLT, &action, NULL);
+ rc += sigaction(SIGSTKFLT, &action, nullptr);
#endif
- rc += sigaction(SIGTRAP, &action, NULL);
+ rc += sigaction(SIGTRAP, &action, nullptr);
// Special dump-all timeout.
if (GetTimeoutSignal() != -1) {
- rc += sigaction(GetTimeoutSignal(), &action, NULL);
+ rc += sigaction(GetTimeoutSignal(), &action, nullptr);
}
CHECK_EQ(rc, 0);
}
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 11b7df6..b93fcb4 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -36,11 +36,11 @@ class ScopedThreadStateChange {
ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
- if (UNLIKELY(self_ == NULL)) {
- // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
+ if (UNLIKELY(self_ == nullptr)) {
+ // Value chosen arbitrarily and won't be used in the destructor since thread_ == null.
old_thread_state_ = kTerminated;
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
+ CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
} else {
DCHECK_EQ(self, Thread::Current());
// Read state without locks, ok as state is effectively thread local and we're not interested
@@ -60,10 +60,10 @@ class ScopedThreadStateChange {
}
~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
- if (UNLIKELY(self_ == NULL)) {
+ if (UNLIKELY(self_ == nullptr)) {
if (!expected_has_no_thread_) {
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown(nullptr);
+ bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDown(nullptr);
CHECK(shutting_down);
}
} else {
@@ -87,7 +87,7 @@ class ScopedThreadStateChange {
protected:
// Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
ScopedThreadStateChange()
- : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
+ : self_(nullptr), thread_state_(kTerminated), old_thread_state_(kTerminated),
expected_has_no_thread_(true) {}
Thread* const self_;
@@ -124,7 +124,7 @@ class ScopedObjectAccessAlreadyRunnable {
* Add a local reference for an object to the indirect reference table associated with the
* current stack frame. When the native function returns, the reference will be discarded.
*
- * We need to allow the same reference to be added multiple times, and cope with NULL.
+ * We need to allow the same reference to be added multiple times, and cope with nullptr.
*
* This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
* it's best if we don't grab a mutex.
@@ -133,8 +133,8 @@ class ScopedObjectAccessAlreadyRunnable {
T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- if (obj == NULL) {
- return NULL;
+ if (obj == nullptr) {
+ return nullptr;
}
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
return Env()->AddLocalReference<T>(obj);
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 26bf655..863d59b 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -53,7 +53,7 @@ static void DumpCmdLine(std::ostream& os) {
os << "Cmd line: " << current_cmd_line << "\n";
const char* stashed_cmd_line = GetCmdLine();
- if (stashed_cmd_line != NULL && current_cmd_line != stashed_cmd_line
+ if (stashed_cmd_line != nullptr && current_cmd_line != stashed_cmd_line
&& strcmp(stashed_cmd_line, "<unset>") != 0) {
os << "Original command line: " << stashed_cmd_line << "\n";
}
@@ -67,15 +67,15 @@ SignalCatcher::SignalCatcher(const std::string& stack_trace_file)
: stack_trace_file_(stack_trace_file),
lock_("SignalCatcher lock"),
cond_("SignalCatcher::cond_", lock_),
- thread_(NULL) {
+ thread_(nullptr) {
SetHaltFlag(false);
// Create a raw pthread; its start routine will attach to the runtime.
- CHECK_PTHREAD_CALL(pthread_create, (&pthread_, NULL, &Run, this), "signal catcher thread");
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread_, nullptr, &Run, this), "signal catcher thread");
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- while (thread_ == NULL) {
+ while (thread_ == nullptr) {
cond_.Wait(self);
}
}
@@ -85,7 +85,7 @@ SignalCatcher::~SignalCatcher() {
// to arrive, send it one.
SetHaltFlag(true);
CHECK_PTHREAD_CALL(pthread_kill, (pthread_, SIGQUIT), "signal catcher shutdown");
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "signal catcher shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "signal catcher shutdown");
}
void SignalCatcher::SetHaltFlag(bool new_value) {
@@ -176,7 +176,7 @@ int SignalCatcher::WaitForSignal(Thread* self, SignalSet& signals) {
void* SignalCatcher::Run(void* arg) {
SignalCatcher* signal_catcher = reinterpret_cast<SignalCatcher*>(arg);
- CHECK(signal_catcher != NULL);
+ CHECK(signal_catcher != nullptr);
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
@@ -199,7 +199,7 @@ void* SignalCatcher::Run(void* arg) {
int signal_number = signal_catcher->WaitForSignal(self, signals);
if (signal_catcher->ShouldHalt()) {
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
switch (signal_number) {
diff --git a/runtime/signal_set.h b/runtime/signal_set.h
index 3b89e6e..c272514 100644
--- a/runtime/signal_set.h
+++ b/runtime/signal_set.h
@@ -38,7 +38,7 @@ class SignalSet {
}
void Block() {
- if (sigprocmask(SIG_BLOCK, &set_, NULL) == -1) {
+ if (sigprocmask(SIG_BLOCK, &set_, nullptr) == -1) {
PLOG(FATAL) << "sigprocmask failed";
}
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 4ae49dd..aa3e320 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -36,12 +36,12 @@ namespace art {
mirror::Object* ShadowFrame::GetThisObject() const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
- return NULL;
+ return nullptr;
} else if (m->IsNative()) {
return GetVRegReference(0);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != NULL) << PrettyMethod(m);
+ CHECK(code_item != nullptr) << PrettyMethod(m);
uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
return GetVRegReference(reg);
}
@@ -50,7 +50,7 @@ mirror::Object* ShadowFrame::GetThisObject() const {
mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
- return NULL;
+ return nullptr;
} else {
return GetVRegReference(NumberOfVRegs() - num_ins);
}
@@ -58,9 +58,9 @@ mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
size_t ManagedStack::NumJniShadowFrameReferences() const {
size_t count = 0;
- for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
current_frame = current_frame->GetLink()) {
if (current_frame->GetMethod()->IsNative()) {
// The JNI ShadowFrame only contains references. (For indirect reference.)
@@ -72,9 +72,9 @@ size_t ManagedStack::NumJniShadowFrameReferences() const {
}
bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
- for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
current_frame = current_frame->GetLink()) {
if (current_frame->Contains(shadow_frame_entry)) {
return true;
@@ -85,23 +85,23 @@ bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_fr
}
StackVisitor::StackVisitor(Thread* thread, Context* context)
- : thread_(thread), cur_shadow_frame_(NULL),
- cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
+ : thread_(thread), cur_shadow_frame_(nullptr),
+ cur_quick_frame_(nullptr), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
context_(context) {
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
StackVisitor::StackVisitor(Thread* thread, Context* context, size_t num_frames)
- : thread_(thread), cur_shadow_frame_(NULL),
- cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
+ : thread_(thread), cur_shadow_frame_(nullptr),
+ cur_quick_frame_(nullptr), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
context_(context) {
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
- if (cur_shadow_frame_ != NULL) {
+ if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetDexPC();
- } else if (cur_quick_frame_ != NULL) {
+ } else if (cur_quick_frame_ != nullptr) {
return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
} else {
return 0;
@@ -183,7 +183,7 @@ bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRe
return GetRegisterIfAccessible(reg, kind, val);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
*val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
@@ -199,7 +199,7 @@ bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
DCHECK_LT(vreg, code_item->registers_size_);
uint16_t number_of_dex_registers = code_item->registers_size_;
@@ -297,7 +297,7 @@ bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -372,7 +372,7 @@ bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uin
return SetRegisterIfAccessible(reg, new_value, kind);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -390,7 +390,7 @@ bool StackVisitor::SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint16_t number_of_dex_registers = code_item->registers_size_;
DCHECK_LT(vreg, number_of_dex_registers);
@@ -488,8 +488,8 @@ bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new
}
}
-bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi) {
+bool StackVisitor::SetVRegPairFromQuickCode(
+ mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
@@ -505,7 +505,7 @@ bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
return SetRegisterPairIfAccessible(reg_lo, reg_hi, new_value, is_float);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -515,8 +515,8 @@ bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
}
}
-bool StackVisitor::SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi) {
+bool StackVisitor::SetVRegPairFromOptimizedCode(
+ mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
uint32_t low_32bits = Low32Bits(new_value);
uint32_t high_32bits = High32Bits(new_value);
bool success = SetVRegFromOptimizedCode(m, vreg, low_32bits, kind_lo);
@@ -585,14 +585,14 @@ void StackVisitor::SetFPR(uint32_t reg, uintptr_t value) {
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
- DCHECK(sp != NULL);
+ DCHECK(sp != nullptr);
uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
- CHECK(sp != NULL);
+ CHECK(sp != nullptr);
uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -600,7 +600,7 @@ void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
size_t StackVisitor::ComputeNumFrames(Thread* thread) {
struct NumFramesVisitor : public StackVisitor {
explicit NumFramesVisitor(Thread* thread_in)
- : StackVisitor(thread_in, NULL), frames(0) {}
+ : StackVisitor(thread_in, nullptr), frames(0) {}
bool VisitFrame() OVERRIDE {
frames++;
@@ -652,7 +652,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32
void StackVisitor::DescribeStack(Thread* thread) {
struct DescribeStackVisitor : public StackVisitor {
explicit DescribeStackVisitor(Thread* thread_in)
- : StackVisitor(thread_in, NULL) {}
+ : StackVisitor(thread_in, nullptr) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
@@ -666,7 +666,7 @@ void StackVisitor::DescribeStack(Thread* thread) {
std::string StackVisitor::DescribeLocation() const {
std::string result("Visiting method '");
mirror::ArtMethod* m = GetMethod();
- if (m == NULL) {
+ if (m == nullptr) {
return "upcall";
}
result += PrettyMethod(m);
@@ -713,24 +713,24 @@ void StackVisitor::WalkStack(bool include_transitions) {
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
- for (const ManagedStack* current_fragment = thread_->GetManagedStack(); current_fragment != NULL;
- current_fragment = current_fragment->GetLink()) {
+ for (const ManagedStack* current_fragment = thread_->GetManagedStack();
+ current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
cur_quick_frame_ = current_fragment->GetTopQuickFrame();
cur_quick_frame_pc_ = 0;
- if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
+ if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
- DCHECK(current_fragment->GetTopShadowFrame() == NULL);
+ DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
- while (method != NULL) {
+ while (method != nullptr) {
SanityCheckFrame();
bool should_continue = VisitFrame();
if (UNLIKELY(!should_continue)) {
return;
}
- if (context_ != NULL) {
+ if (context_ != nullptr) {
context_->FillCalleeSaves(*this);
}
size_t frame_size = method->GetFrameSizeInBytes();
@@ -748,7 +748,8 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
// Skip runtime save all callee frames which are used to deliver exceptions.
} else if (instrumentation_frame.interpreter_entry_) {
- mirror::ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ mirror::ArtMethod* callee =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
} else if (instrumentation_frame.method_ != GetMethod()) {
@@ -771,7 +772,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_depth_++;
method = cur_quick_frame_->AsMirrorPtr();
}
- } else if (cur_shadow_frame_ != NULL) {
+ } else if (cur_shadow_frame_ != nullptr) {
do {
SanityCheckFrame();
bool should_continue = VisitFrame();
@@ -780,7 +781,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
cur_depth_++;
cur_shadow_frame_ = cur_shadow_frame_->GetLink();
- } while (cur_shadow_frame_ != NULL);
+ } while (cur_shadow_frame_ != nullptr);
}
if (include_transitions) {
bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index fbb0aa4..ed9e458 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -295,11 +295,12 @@ class ShadowFrame {
}
StackReference<mirror::Object>* References() {
- return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
+ return const_cast<StackReference<mirror::Object>*>(
+ const_cast<const ShadowFrame*>(this)->References());
}
const uint32_t number_of_vregs_;
- // Link to previous shadow frame or NULL.
+ // Link to previous shadow frame or null.
ShadowFrame* link_;
mirror::ArtMethod* method_;
uint32_t dex_pc_;
@@ -571,7 +572,8 @@ class StackVisitor {
* Special temporaries may have custom locations and the logic above deals with that.
* However, non-special temporaries are placed relative to the outs.
*/
- int temps_start = sizeof(StackReference<mirror::ArtMethod>) + code_item->outs_size_ * sizeof(uint32_t);
+ int temps_start = sizeof(StackReference<mirror::ArtMethod>) +
+ code_item->outs_size_ * sizeof(uint32_t);
int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
return temps_start + relative_offset;
} else if (reg < num_regs) {
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 16add79..f7ef894 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -35,10 +35,10 @@ static inline Thread* ThreadForEnv(JNIEnv* env) {
}
inline Thread* Thread::Current() {
- // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
+ // We rely on Thread::Current returning null for a detached thread, so it's not obvious
// that we can replace this with a direct %fs access on x86.
if (!is_started_) {
- return NULL;
+ return nullptr;
} else {
void* thread = pthread_getspecific(Thread::pthread_key_self_);
return reinterpret_cast<Thread*>(thread);
@@ -92,7 +92,7 @@ inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
// We expect no locks except the mutator_lock_ or thread list suspend thread lock.
if (i != kMutatorLock) {
BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
+ if (held_mutex != nullptr) {
LOG(ERROR) << "holding \"" << held_mutex->GetName()
<< "\" at point where thread suspension is expected";
bad_mutexes_held = true;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 5ca51fb..fa65bce 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -588,7 +588,8 @@ void Thread::Dump(std::ostream& os) const {
mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
+ return (tlsPtr_.opeer != nullptr) ?
+ reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
}
void Thread::GetThreadName(std::string& name) const {
@@ -713,9 +714,8 @@ bool Thread::RequestCheckpoint(Closure* function) {
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
- bool success =
- tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
- new_state_and_flags.as_int);
+ bool success =tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+ old_state_and_flags.as_int, new_state_and_flags.as_int);
if (UNLIKELY(!success)) {
// The thread changed state before the checkpoint was installed.
CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
@@ -1005,8 +1005,8 @@ static bool ShouldShowNativeStack(const Thread* thread)
// Threads with no managed stack frames should be shown.
const ManagedStack* managed_stack = thread->GetManagedStack();
- if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL &&
- managed_stack->GetTopShadowFrame() == NULL)) {
+ if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
+ managed_stack->GetTopShadowFrame() == nullptr)) {
return true;
}
@@ -1097,7 +1097,7 @@ void Thread::Startup() {
{
// MutexLock to keep annotalysis happy.
//
- // Note we use nullptr for the thread because Thread::Current can
+ // Note we use null for the thread because Thread::Current can
// return garbage since (is_started_ == true) and
// Thread::pthread_key_self_ is not yet initialized.
// This was seen on glibc.
@@ -1162,7 +1162,7 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte
bool Thread::IsStillStarting() const {
// You might think you can check whether the state is kStarting, but for much of thread startup,
// the thread is in kNative; it might also be in kVmWait.
- // You might think you can check whether the peer is nullptr, but the peer is actually created and
+ // You might think you can check whether the peer is null, but the peer is actually created and
// assigned fairly early on, and needs to be.
// It turns out that the last thing to change is the thread name; that's a good proxy for "has
// this thread _ever_ entered kRunnable".
@@ -1424,7 +1424,7 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
DCHECK_EQ(kind, kWeakGlobal);
result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
- // This is a special case where it's okay to return nullptr.
+ // This is a special case where it's okay to return null.
expect_null = true;
result = nullptr;
}
@@ -2197,7 +2197,7 @@ class ReferenceMapVisitor : public StackVisitor {
const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
- // Can't be nullptr or how would we compile its instructions?
+ // Can't be null or how would we compile its instructions?
DCHECK(code_item != nullptr) << PrettyMethod(m);
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = std::min(map.RegWidth() * 8,
diff --git a/runtime/thread.h b/runtime/thread.h
index 719668b..dd9e734 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -186,7 +186,7 @@ class Thread {
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
+ // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
// case we use 'tid' to identify the thread, and we'll include as much information as we can.
static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
@@ -246,7 +246,7 @@ class Thread {
// Once called thread suspension will cause an assertion failure.
const char* StartAssertNoThreadSuspension(const char* cause) {
if (kIsDebugBuild) {
- CHECK(cause != NULL);
+ CHECK(cause != nullptr);
const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
tls32_.no_thread_suspension++;
tlsPtr_.last_no_thread_suspension_cause = cause;
@@ -298,7 +298,7 @@ class Thread {
return tls32_.tid;
}
- // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
+ // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -341,7 +341,7 @@ class Thread {
void SetException(mirror::Throwable* new_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(new_exception != NULL);
+ CHECK(new_exception != nullptr);
// TODO: DCHECK(!IsExceptionPending());
tlsPtr_.exception = new_exception;
}
@@ -393,11 +393,11 @@ class Thread {
(tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
}
- // If 'msg' is NULL, no detail message is set.
+ // If 'msg' is null, no detail message is set.
void ThrowNewException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
+ // If 'msg' is null, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -489,8 +489,8 @@ class Thread {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
- // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
- // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
+ // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
+ // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
// with the number of valid frames in the returned array.
static jobjectArray InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
@@ -1097,7 +1097,7 @@ class Thread {
// The biased card table, see CardTable for details.
uint8_t* card_table;
- // The pending exception or NULL.
+ // The pending exception or null.
mirror::Throwable* exception;
// The end of this thread's stack. This is the lowest safely-addressable address on the stack.
@@ -1133,13 +1133,13 @@ class Thread {
// Pointer to previous stack trace captured by sampling profiler.
std::vector<mirror::ArtMethod*>* stack_trace_sample;
- // The next thread in the wait set this thread is part of or NULL if not waiting.
+ // The next thread in the wait set this thread is part of or null if not waiting.
Thread* wait_next;
// If we're blocked in MonitorEnter, this is the object we're trying to lock.
mirror::Object* monitor_enter_object;
- // Top of linked list of handle scopes or nullptr for none.
+ // Top of linked list of handle scopes or null for none.
HandleScope* top_handle_scope;
// Needed to get the right ClassLoader in JNI_OnLoad, but also
@@ -1174,7 +1174,7 @@ class Thread {
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
- // Pending checkpoint function or NULL if non-pending. Installation guarding by
+ // Pending checkpoint function or null if non-pending. Installation guarding by
// Locks::thread_suspend_count_lock_.
Closure* checkpoint_functions[kMaxCheckpoints];
@@ -1215,7 +1215,7 @@ class Thread {
// Condition variable waited upon during a wait.
ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
- // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
+ // Pointer to the monitor lock we're currently waiting on or null if not waiting.
Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
// Thread "interrupted" status; stays raised until queried or thrown.
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index 0284364..0526f49 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -50,26 +50,26 @@ void Thread::SetUpAlternateSignalStack() {
ss.ss_sp = new uint8_t[kHostAltSigStackSize];
ss.ss_size = kHostAltSigStackSize;
ss.ss_flags = 0;
- CHECK(ss.ss_sp != NULL);
- SigAltStack(&ss, NULL);
+ CHECK(ss.ss_sp != nullptr);
+ SigAltStack(&ss, nullptr);
// Double-check that it worked.
- ss.ss_sp = NULL;
- SigAltStack(NULL, &ss);
+ ss.ss_sp = nullptr;
+ SigAltStack(nullptr, &ss);
VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp;
}
void Thread::TearDownAlternateSignalStack() {
// Get the pointer so we can free the memory.
stack_t ss;
- SigAltStack(NULL, &ss);
+ SigAltStack(nullptr, &ss);
uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp);
// Tell the kernel to stop using it.
- ss.ss_sp = NULL;
+ ss.ss_sp = nullptr;
ss.ss_flags = SS_DISABLE;
ss.ss_size = kHostAltSigStackSize; // Avoid ENOMEM failure with Mac OS' buggy libc.
- SigAltStack(&ss, NULL);
+ SigAltStack(&ss, nullptr);
// Free it.
delete[] allocated_signal_stack;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 560bcc1..cc54bbd 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -116,9 +116,9 @@ void ThreadList::DumpForSigQuit(std::ostream& os) {
}
static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_ANALYSIS {
- // TODO: No thread safety analysis as DumpState with a NULL thread won't access fields, should
+ // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
// refactor DumpState to avoid skipping analysis.
- Thread::DumpState(os, NULL, tid);
+ Thread::DumpState(os, nullptr, tid);
DumpKernelStack(os, tid, " kernel: ", false);
// TODO: Reenable this when the native code in system_server can handle it.
// Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
@@ -136,7 +136,7 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) {
Thread* self = Thread::Current();
dirent* e;
- while ((e = readdir(d)) != NULL) {
+ while ((e = readdir(d)) != nullptr) {
char* end;
pid_t tid = strtol(e->d_name, &end, 10);
if (!*end) {
@@ -602,7 +602,7 @@ static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const
scoped_name_string(env, (jstring)env->GetObjectField(peer,
WellKnownClasses::java_lang_Thread_name));
ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
- if (scoped_name_chars.c_str() == NULL) {
+ if (scoped_name_chars.c_str() == nullptr) {
LOG(severity) << message << ": " << peer;
env->ExceptionClear();
} else {
@@ -813,7 +813,7 @@ Thread* ThreadList::FindThreadByThreadId(uint32_t thin_lock_id) {
return thread;
}
}
- return NULL;
+ return nullptr;
}
void ThreadList::SuspendAllForDebugger() {
@@ -865,7 +865,7 @@ void ThreadList::SuspendSelfForDebugger() {
// The debugger thread must not suspend itself due to debugger activity!
Thread* debug_thread = Dbg::GetDebugThread();
- CHECK(debug_thread != NULL);
+ CHECK(debug_thread != nullptr);
CHECK(self != debug_thread);
CHECK_NE(self->GetState(), kRunnable);
Locks::mutator_lock_->AssertNotHeld(self);
@@ -1142,7 +1142,7 @@ void ThreadList::Unregister(Thread* self) {
// Clear the TLS data, so that the underlying native thread is recognizably detached.
// (It may wish to reattach later.)
- CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self");
+ CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
// Signal that a thread just detached.
MutexLock mu(nullptr, *Locks::thread_list_lock_);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index fa747b8..0f094cc 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,7 +68,7 @@ class ThreadList {
// Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
- // else NULL. The peer is used to identify the thread to avoid races with the thread terminating.
+ // else null. The peer is used to identify the thread to avoid races with the thread terminating.
// If the thread should be suspended then value of request_suspension should be true otherwise
// the routine will wait for a previous suspend request. If the suspension times out then *timeout
// is set to true.
@@ -79,7 +79,7 @@ class ThreadList {
Locks::thread_suspend_count_lock_);
// Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
- // thread on success else NULL. The thread id is used to identify the thread to avoid races with
+ // thread on success else null. The thread id is used to identify the thread to avoid races with
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
@@ -164,7 +164,7 @@ class ThreadList {
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
- void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = NULL)
+ void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 2a82285..ce76eae 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -158,7 +158,7 @@ Task* ThreadPool::GetTask(Thread* self) {
--waiting_count_;
}
- // We are shutting down, return nullptr to tell the worker thread to stop looping.
+ // We are shutting down, return null to tell the worker thread to stop looping.
return nullptr;
}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 79b57af..0557708 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -112,7 +112,7 @@ class ThreadPool {
// get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self);
- // Try to get a task, returning NULL if there is none available.
+ // Try to get a task, returning null if there is none available.
Task* TryGetTask(Thread* self);
Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
@@ -166,7 +166,7 @@ class WorkStealingWorker : public ThreadPoolWorker {
virtual ~WorkStealingWorker();
bool IsRunningTask() const {
- return task_ != NULL;
+ return task_ != nullptr;
}
protected:
diff --git a/runtime/trace.h b/runtime/trace.h
index b8329ff..1ecd4d8 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -172,7 +172,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
void WriteToBuf(const uint8_t* src, size_t src_size)
EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
- // Singleton instance of the Trace or NULL when no method tracing is active.
+ // Singleton instance of the Trace or null when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
// The default profiler clock source.
@@ -184,7 +184,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
// Used to remember an unused stack trace to avoid re-allocation during sampling.
static std::unique_ptr<std::vector<mirror::ArtMethod*>> temp_stack_trace_;
- // File to write trace data out to, NULL if direct to ddms.
+ // File to write trace data out to, null if direct to ddms.
std::unique_ptr<File> trace_file_;
// Buffer to store trace data.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index a303aa4..ec7131d 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -60,7 +60,7 @@ static constexpr bool kUseAddr2line = !kIsTargetBuild;
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (NULL, &owner), __FUNCTION__); // Requires Mac OS 10.6
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (nullptr, &owner), __FUNCTION__); // Requires Mac OS 10.6
return owner;
#elif defined(__BIONIC__)
return gettid();
@@ -205,7 +205,7 @@ bool PrintFileToLog(const std::string& file_name, LogSeverity level) {
}
std::string GetIsoDate() {
- time_t now = time(NULL);
+ time_t now = time(nullptr);
tm tmbuf;
tm* ptm = localtime_r(&now, &tmbuf);
return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d",
@@ -220,7 +220,7 @@ uint64_t MilliTime() {
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
#endif
}
@@ -232,7 +232,7 @@ uint64_t MicroTime() {
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
#endif
}
@@ -244,7 +244,7 @@ uint64_t NanoTime() {
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
#endif
}
@@ -264,7 +264,7 @@ void NanoSleep(uint64_t ns) {
timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = ns;
- nanosleep(&tm, NULL);
+ nanosleep(&tm, nullptr);
}
void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
@@ -276,7 +276,7 @@ void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts
#else
UNUSED(clock);
timeval tv;
- gettimeofday(&tv, NULL);
+ gettimeofday(&tv, nullptr);
ts->tv_sec = tv.tv_sec;
ts->tv_nsec = tv.tv_usec * 1000;
#endif
@@ -301,14 +301,14 @@ void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts
}
std::string PrettyDescriptor(mirror::String* java_descriptor) {
- if (java_descriptor == NULL) {
+ if (java_descriptor == nullptr) {
return "null";
}
return PrettyDescriptor(java_descriptor->ToModifiedUtf8().c_str());
}
std::string PrettyDescriptor(mirror::Class* klass) {
- if (klass == NULL) {
+ if (klass == nullptr) {
return "null";
}
std::string temp;
@@ -365,7 +365,7 @@ std::string PrettyDescriptor(const char* descriptor) {
}
std::string PrettyField(ArtField* f, bool with_type) {
- if (f == NULL) {
+ if (f == nullptr) {
return "null";
}
std::string result;
@@ -436,7 +436,7 @@ std::string PrettyArguments(const char* signature) {
std::string PrettyReturnType(const char* signature) {
const char* return_type = strchr(signature, ')');
- CHECK(return_type != NULL);
+ CHECK(return_type != nullptr);
++return_type; // Skip ')'.
return PrettyDescriptor(return_type);
}
@@ -484,10 +484,10 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with
}
std::string PrettyTypeOf(mirror::Object* obj) {
- if (obj == NULL) {
+ if (obj == nullptr) {
return "null";
}
- if (obj->GetClass() == NULL) {
+ if (obj->GetClass() == nullptr) {
return "(raw)";
}
std::string temp;
@@ -499,7 +499,7 @@ std::string PrettyTypeOf(mirror::Object* obj) {
}
std::string PrettyClass(mirror::Class* c) {
- if (c == NULL) {
+ if (c == nullptr) {
return "null";
}
std::string result;
@@ -510,7 +510,7 @@ std::string PrettyClass(mirror::Class* c) {
}
std::string PrettyClassAndClassLoader(mirror::Class* c) {
- if (c == NULL) {
+ if (c == nullptr) {
return "null";
}
std::string result;
@@ -1158,9 +1158,9 @@ void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu)
std::vector<std::string> fields;
Split(stats, ' ', &fields);
*state = fields[0][0];
- *utime = strtoull(fields[11].c_str(), NULL, 10);
- *stime = strtoull(fields[12].c_str(), NULL, 10);
- *task_cpu = strtoull(fields[36].c_str(), NULL, 10);
+ *utime = strtoull(fields[11].c_str(), nullptr, 10);
+ *stime = strtoull(fields[12].c_str(), nullptr, 10);
+ *task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
}
std::string GetSchedulerGroupName(pid_t tid) {
@@ -1358,7 +1358,7 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
// into "futex_wait_queue_me+0xcd/0x110".
const char* text = kernel_stack_frames[i].c_str();
const char* close_bracket = strchr(text, ']');
- if (close_bracket != NULL) {
+ if (close_bracket != nullptr) {
text = close_bracket + 2;
}
os << prefix;
@@ -1373,7 +1373,7 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
const char* GetAndroidRoot() {
const char* android_root = getenv("ANDROID_ROOT");
- if (android_root == NULL) {
+ if (android_root == nullptr) {
if (OS::DirectoryExists("/system")) {
android_root = "/system";
} else {
@@ -1401,7 +1401,7 @@ const char* GetAndroidData() {
const char* GetAndroidDataSafe(std::string* error_msg) {
const char* android_data = getenv("ANDROID_DATA");
- if (android_data == NULL) {
+ if (android_data == nullptr) {
if (OS::DirectoryExists("/data")) {
android_data = "/data";
} else {
@@ -1563,7 +1563,7 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
CHECK(arg_str != nullptr) << i;
args.push_back(arg_str);
}
- args.push_back(NULL);
+ args.push_back(nullptr);
// fork and exec
pid_t pid = fork();
diff --git a/runtime/utils.h b/runtime/utils.h
index 6708c67..853fa08 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -508,7 +508,7 @@ const char* GetAndroidRoot();
// Find $ANDROID_DATA, /data, or abort.
const char* GetAndroidData();
-// Find $ANDROID_DATA, /data, or return nullptr.
+// Find $ANDROID_DATA, /data, or return null.
const char* GetAndroidDataSafe(std::string* error_msg);
// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 6ccbd13..ae24b77 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -106,7 +106,7 @@ TEST_F(UtilsTest, PrettyReturnType) {
TEST_F(UtilsTest, PrettyTypeOf) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyTypeOf(NULL));
+ EXPECT_EQ("null", PrettyTypeOf(nullptr));
StackHandleScope<2> hs(soa.Self());
Handle<mirror::String> s(hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "")));
@@ -116,7 +116,7 @@ TEST_F(UtilsTest, PrettyTypeOf) {
EXPECT_EQ("short[]", PrettyTypeOf(a.Get()));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.String[]", PrettyTypeOf(o));
EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyTypeOf(o->GetClass()));
@@ -124,25 +124,25 @@ TEST_F(UtilsTest, PrettyTypeOf) {
TEST_F(UtilsTest, PrettyClass) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyClass(NULL));
+ EXPECT_EQ("null", PrettyClass(nullptr));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyClass(o->GetClass()));
}
TEST_F(UtilsTest, PrettyClassAndClassLoader) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyClassAndClassLoader(NULL));
+ EXPECT_EQ("null", PrettyClassAndClassLoader(nullptr));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.Class<java.lang.String[],null>", PrettyClassAndClassLoader(o->GetClass()));
}
TEST_F(UtilsTest, PrettyField) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyField(NULL));
+ EXPECT_EQ("null", PrettyField(nullptr));
mirror::Class* java_lang_String = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/String;");
@@ -216,21 +216,21 @@ TEST_F(UtilsTest, MangleForJni) {
TEST_F(UtilsTest, JniShortName_JniLongName) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::ArtMethod* m;
m = c->FindVirtualMethod("charAt", "(I)C");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m));
m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m));
m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m));
}
diff --git a/runtime/verifier/dex_gc_map.cc b/runtime/verifier/dex_gc_map.cc
index cd0b137..c435f9f 100644
--- a/runtime/verifier/dex_gc_map.cc
+++ b/runtime/verifier/dex_gc_map.cc
@@ -49,7 +49,7 @@ const uint8_t* DexPcToReferenceMap::FindBitMap(uint16_t dex_pc, bool error_if_no
if (error_if_not_present) {
LOG(ERROR) << "Didn't find reference bit map for dex_pc " << dex_pc;
}
- return NULL;
+ return nullptr;
}
} // namespace verifier
diff --git a/runtime/verifier/dex_gc_map.h b/runtime/verifier/dex_gc_map.h
index d77ea65..03a7821 100644
--- a/runtime/verifier/dex_gc_map.h
+++ b/runtime/verifier/dex_gc_map.h
@@ -39,7 +39,7 @@ enum RegisterMapFormat {
class DexPcToReferenceMap {
public:
explicit DexPcToReferenceMap(const uint8_t* data) : data_(data) {
- CHECK(data_ != NULL);
+ CHECK(data_ != nullptr);
}
// The total size of the reference bit map including header.
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index cd414c2..2914b7c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -244,11 +244,11 @@ class MethodVerifier {
bool HasFailures() const;
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the method of a quick invoke or nullptr if it cannot be found.
+ // Returns the method of a quick invoke or null if it cannot be found.
mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range, bool allow_failure)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the access field of a quick field access (iget/iput-quick) or nullptr
+ // Returns the access field of a quick field access (iget/iput-quick) or null
// if it cannot be found.
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -585,7 +585,7 @@ class MethodVerifier {
* Widening conversions on integers and references are allowed, but
* narrowing conversions are not.
*
- * Returns the resolved method on success, nullptr on failure (with *failure
+ * Returns the resolved method on success, null on failure (with *failure
* set appropriately).
*/
mirror::ArtMethod* VerifyInvocationArgs(const Instruction* inst,
@@ -686,7 +686,7 @@ class MethodVerifier {
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
uint32_t interesting_dex_pc_;
// The container into which FindLocksAtDexPc should write the registers containing held locks,
- // nullptr if we're not doing FindLocksAtDexPc.
+ // null if we're not doing FindLocksAtDexPc.
std::vector<uint32_t>* monitor_enter_dex_pcs_;
// The types of any error that occurs.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index f67adc1..3994536 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -31,7 +31,7 @@ class MethodVerifierTest : public CommonRuntimeTest {
protected:
void VerifyClass(const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ASSERT_TRUE(descriptor != NULL);
+ ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e4d2c3e..d08c937 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -707,7 +707,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType {
UnresolvedUninitializedRefType(const std::string& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
+ : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -752,7 +752,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
UnresolvedUninitializedThisRefType(const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : UninitializedType(NULL, descriptor, 0, cache_id) {
+ : UninitializedType(nullptr, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -808,7 +808,7 @@ class UnresolvedType : public RegType {
public:
UnresolvedType(const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : RegType(NULL, descriptor, cache_id) {}
+ : RegType(nullptr, descriptor, cache_id) {}
bool IsNonZeroReferenceTypes() const OVERRIDE;
};
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9024a7d..b6f253b 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -30,7 +30,7 @@ namespace verifier {
inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
const RegType* result = entries_[id];
- DCHECK(result != NULL);
+ DCHECK(result != nullptr);
return *result;
}
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index f57f9c4..a803df8 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -78,7 +78,7 @@ jmethodID WellKnownClasses::java_lang_ref_ReferenceQueue_add;
jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
jmethodID WellKnownClasses::java_lang_Short_valueOf;
-jmethodID WellKnownClasses::java_lang_System_runFinalization = NULL;
+jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
jmethodID WellKnownClasses::java_lang_Thread_init;
jmethodID WellKnownClasses::java_lang_Thread_run;
jmethodID WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
@@ -123,7 +123,7 @@ jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type;
static jclass CacheClass(JNIEnv* env, const char* jni_class_name) {
ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name));
- if (c.get() == NULL) {
+ if (c.get() == nullptr) {
LOG(FATAL) << "Couldn't find class: " << jni_class_name;
}
return reinterpret_cast<jclass>(env->NewGlobalRef(c.get()));
@@ -134,7 +134,7 @@ static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static,
jfieldID fid = (is_static ?
env->GetStaticFieldID(c, name, signature) :
env->GetFieldID(c, name, signature));
- if (fid == NULL) {
+ if (fid == nullptr) {
ScopedObjectAccess soa(env);
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -149,7 +149,7 @@ jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static,
jmethodID mid = (is_static ?
env->GetStaticMethodID(c, name, signature) :
env->GetMethodID(c, name, signature));
- if (mid == NULL) {
+ if (mid == nullptr) {
ScopedObjectAccess soa(env);
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index ffab674..88c1f69 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -56,7 +56,7 @@ MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_fi
name += " extracted in memory from ";
name += zip_filename;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
- NULL, GetUncompressedLength(),
+ nullptr, GetUncompressedLength(),
PROT_READ | PROT_WRITE, false, false,
error_msg));
if (map.get() == nullptr) {
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 865af51..717eb8c 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -57,7 +57,7 @@ class ZipEntry {
class ZipArchive {
public:
- // return new ZipArchive instance on success, NULL on error.
+ // return new ZipArchive instance on success, null on error.
static ZipArchive* Open(const char* filename, std::string* error_msg);
static ZipArchive* OpenFromFd(int fd, const char* filename, std::string* error_msg);
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 70a4dda..aded30c 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -42,11 +42,11 @@ TEST_F(ZipArchiveTest, FindAndExtract) {
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
ASSERT_TRUE(error_msg.empty());
- file.reset(NULL);
+ file.reset(nullptr);
uint32_t computed_crc = crc32(0L, Z_NULL, 0);
int fd = open(tmp.GetFilename().c_str(), O_RDONLY);
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index e61fcd8..0359ed3 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -51,7 +51,7 @@ class SignalAction {
// Unclaim the signal and restore the old action.
void Unclaim(int signal) {
claimed_ = false;
- sigaction(signal, &action_, NULL); // Restore old action.
+ sigaction(signal, &action_, nullptr); // Restore old action.
}
// Get the action associated with this signal.
@@ -133,14 +133,14 @@ extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context)
const struct sigaction& action = user_sigactions[sig].GetAction();
if (user_sigactions[sig].OldStyle()) {
- if (action.sa_handler != NULL) {
+ if (action.sa_handler != nullptr) {
action.sa_handler(sig);
} else {
signal(sig, SIG_DFL);
raise(sig);
}
} else {
- if (action.sa_sigaction != NULL) {
+ if (action.sa_sigaction != nullptr) {
action.sa_sigaction(sig, info, context);
} else {
signal(sig, SIG_DFL);
@@ -172,10 +172,10 @@ extern "C" int sigaction(int signal, const struct sigaction* new_action, struct
if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed() &&
(new_action == nullptr || new_action->sa_handler != SIG_DFL)) {
struct sigaction saved_action = user_sigactions[signal].GetAction();
- if (new_action != NULL) {
+ if (new_action != nullptr) {
user_sigactions[signal].SetAction(*new_action, false);
}
- if (old_action != NULL) {
+ if (old_action != nullptr) {
*old_action = saved_action;
}
return 0;
@@ -242,7 +242,7 @@ extern "C" sighandler_t signal(int signal, sighandler_t handler) {
extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
const sigset_t* new_set_ptr = bionic_new_set;
sigset_t tmpset;
- if (bionic_new_set != NULL) {
+ if (bionic_new_set != nullptr) {
tmpset = *bionic_new_set;
if (how == SIG_BLOCK) {
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 544cbc5..b23b97b 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -25,7 +25,7 @@
#error test code compiled without NDEBUG
#endif
-static JavaVM* jvm = NULL;
+static JavaVM* jvm = nullptr;
extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
assert(vm != nullptr);
@@ -38,7 +38,7 @@ static void* AttachHelper(void* arg) {
assert(jvm != nullptr);
JNIEnv* env = nullptr;
- JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
+ JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, nullptr };
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 876d27e..1414715 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -89,7 +89,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_terminateSignalTest(JNIEnv*, jclass)
}
// Prevent the compiler being a smart-alec and optimizing out the assignment
-// to nullptr.
+// to null.
char *go_away_compiler = nullptr;
extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {