summaryrefslogtreecommitdiffstats
path: root/compiler/dex
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/dex')
-rw-r--r--compiler/dex/dataflow_iterator-inl.h8
-rw-r--r--compiler/dex/dataflow_iterator.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/dex/mir_analysis.cc6
-rw-r--r--compiler/dex/mir_dataflow.cc14
-rw-r--r--compiler/dex/mir_field_info.h2
-rw-r--r--compiler/dex/mir_graph.cc54
-rw-r--r--compiler/dex/mir_graph.h2
-rw-r--r--compiler/dex/mir_method_info.h4
-rw-r--r--compiler/dex/mir_optimization.cc84
-rw-r--r--compiler/dex/pass_driver.h2
-rw-r--r--compiler/dex/pass_driver_me.h7
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc18
-rw-r--r--compiler/dex/quick/arm/call_arm.cc16
-rw-r--r--compiler/dex/quick/arm/int_arm.cc12
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc12
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc2
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc14
-rw-r--r--compiler/dex/quick/codegen_util.cc2
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc2
-rw-r--r--compiler/dex/quick/gen_common.cc26
-rw-r--r--compiler/dex/quick/gen_loadstore.cc2
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc8
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/int_mips.cc8
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc12
-rw-r--r--compiler/dex/quick/mir_to_lir.cc18
-rw-r--r--compiler/dex/quick/mir_to_lir.h14
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc2
-rw-r--r--compiler/dex/quick/quick_compiler.cc6
-rw-r--r--compiler/dex/quick/ralloc_util.cc2
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc16
-rw-r--r--compiler/dex/quick/x86/call_x86.cc2
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc6
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc4
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc2
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc12
-rw-r--r--compiler/dex/ssa_transformation.cc40
-rw-r--r--compiler/dex/verification_results.cc2
-rw-r--r--compiler/dex/verified_method.cc4
-rw-r--r--compiler/dex/verified_method.h2
43 files changed, 247 insertions, 242 deletions
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 6e25db6..83dfc28 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -23,7 +23,7 @@ namespace art {
// Single forward pass over the nodes.
inline BasicBlock* DataflowIterator::ForwardSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ < end_idx_) {
@@ -38,7 +38,7 @@ inline BasicBlock* DataflowIterator::ForwardSingleNext() {
// Repeat full forward passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we at the end and have we changed something?
if ((idx_ >= end_idx_) && changed_ == true) {
@@ -61,7 +61,7 @@ inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
// Single reverse pass over the nodes.
inline BasicBlock* DataflowIterator::ReverseSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ >= 0) {
@@ -76,7 +76,7 @@ inline BasicBlock* DataflowIterator::ReverseSingleNext() {
// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we done and we changed something during the last iteration?
if ((idx_ < 0) && changed_) {
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 2a06cec..097c2a4 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -72,7 +72,7 @@ namespace art {
: mir_graph_(mir_graph),
start_idx_(start_idx),
end_idx_(end_idx),
- block_id_list_(NULL),
+ block_id_list_(nullptr),
idx_(0),
repeats_(0),
changed_(false) {}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ef94d8b..d1ddfda 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -301,7 +301,7 @@ extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::C
art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
- art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+ art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
driver.GetVerifiedMethod(&dex_file, method_idx));
art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3d7a640..9099e8a 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -968,7 +968,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
* edges until we reach an explicit branch or return.
*/
BasicBlock* ending_bb = bb;
- if (ending_bb->last_mir_insn != NULL) {
+ if (ending_bb->last_mir_insn != nullptr) {
uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
while ((ending_flags & kAnBranch) == 0) {
ending_bb = GetBasicBlock(ending_bb->fall_through);
@@ -998,7 +998,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
bool done = false;
while (!done) {
tbb->visited = true;
- for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
// Skip any MIR pseudo-op.
continue;
@@ -1195,7 +1195,7 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
ClearAllVisitedFlags();
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
AnalyzeBlock(bb, &stats);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index eaaf540..b4aec98 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -989,7 +989,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
MIR* mir;
ArenaBitVector *use_v, *def_v, *live_in_v;
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
use_v = bb->data_flow_info->use_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
@@ -998,7 +998,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
live_in_v = bb->data_flow_info->live_in_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
@@ -1188,7 +1188,7 @@ void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
/* Entry function to convert a block into SSA representation */
bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
/*
* Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
@@ -1211,7 +1211,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
}
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
kArenaAllocDFInfo));
@@ -1402,8 +1402,8 @@ void MIRGraph::CountUses(BasicBlock* bb) {
return;
}
uint32_t weight = GetUseCountWeight(bb);
- for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
@@ -1448,7 +1448,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
void MIRGraph::VerifyDataflow() {
/* Verify if all blocks are connected as claimed */
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
VerifyPredInfo(bb);
}
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 11773e7..e4570fd 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -124,7 +124,7 @@ class MirFieldInfo {
uint16_t declaring_field_idx_;
// The type index of the class declaring the field, 0 if unresolved.
uint16_t declaring_class_idx_;
- // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ // The dex file that defines the class containing the field and the field, null if unresolved.
const DexFile* declaring_dex_file_;
};
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 7d0729f..b5c42f1 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -81,15 +81,15 @@ const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
};
MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
- : reg_location_(NULL),
+ : reg_location_(nullptr),
block_id_map_(std::less<unsigned int>(), arena->Adapter()),
cu_(cu),
ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
- vreg_to_ssa_map_(NULL),
- ssa_last_defs_(NULL),
- is_constant_v_(NULL),
- constant_values_(NULL),
+ vreg_to_ssa_map_(nullptr),
+ ssa_last_defs_(nullptr),
+ is_constant_v_(nullptr),
+ constant_values_(nullptr),
use_counts_(arena->Adapter()),
raw_use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
@@ -106,24 +106,24 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
max_nested_loops_(0u),
- i_dom_list_(NULL),
+ i_dom_list_(nullptr),
temp_scoped_alloc_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
- try_block_addr_(NULL),
- entry_block_(NULL),
- exit_block_(NULL),
- current_code_item_(NULL),
+ try_block_addr_(nullptr),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
+ current_code_item_(nullptr),
m_units_(arena->Adapter()),
method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
- opcode_count_(NULL),
+ opcode_count_(nullptr),
num_ssa_regs_(0),
extended_basic_blocks_(arena->Adapter()),
method_sreg_(0),
attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke.
- checkstats_(NULL),
+ checkstats_(nullptr),
arena_(arena),
backward_branches_(0),
forward_branches_(0),
@@ -185,13 +185,13 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
DCHECK_GT(code_offset, orig_block->start_offset);
MIR* insn = orig_block->first_mir_insn;
- MIR* prev = NULL; // Will be set to instruction before split.
+ MIR* prev = nullptr; // Will be set to instruction before split.
while (insn) {
if (insn->offset == code_offset) break;
prev = insn;
insn = insn->next;
}
- if (insn == NULL) {
+ if (insn == nullptr) {
LOG(FATAL) << "Break split failed";
}
// Now insn is at the instruction where we want to split, namely
@@ -530,7 +530,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
size = switch_data[1];
first_key = switch_data[2] | (switch_data[3] << 16);
target_table = reinterpret_cast<const int*>(&switch_data[4]);
- keyTable = NULL; // Make the compiler happy.
+ keyTable = nullptr; // Make the compiler happy.
/*
* Sparse switch data format:
* ushort ident = 0x0200 magic value
@@ -718,8 +718,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
// If this is the first method, set up default entry and exit blocks.
if (current_method_ == 0) {
- DCHECK(entry_block_ == NULL);
- DCHECK(exit_block_ == NULL);
+ DCHECK(entry_block_ == nullptr);
+ DCHECK(exit_block_ == nullptr);
DCHECK_EQ(GetNumBlocks(), 0U);
// Use id 0 to represent a null block.
BasicBlock* null_block = CreateNewBB(kNullBlock);
@@ -755,7 +755,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
insn->m_unit_index = current_method_;
int width = ParseInsn(code_ptr, &insn->dalvikInsn);
Instruction::Code opcode = insn->dalvikInsn.opcode;
- if (opcode_count_ != NULL) {
+ if (opcode_count_ != nullptr) {
opcode_count_[static_cast<int>(opcode)]++;
}
@@ -879,7 +879,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
void MIRGraph::ShowOpcodeStats() {
- DCHECK(opcode_count_ != NULL);
+ DCHECK(opcode_count_ != nullptr);
LOG(INFO) << "Opcode Count";
for (int i = 0; i < kNumPackedOpcodes; i++) {
if (opcode_count_[i] != 0) {
@@ -947,7 +947,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
return;
}
file = fopen(fpath.c_str(), "w");
- if (file == NULL) {
+ if (file == nullptr) {
PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
return;
}
@@ -961,7 +961,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
for (idx = 0; idx < num_blocks; idx++) {
int block_idx = all_blocks ? idx : dfs_order_[idx];
BasicBlock* bb = GetBasicBlock(block_idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
if (bb->hidden) continue;
if (bb->block_type == kEntryBlock) {
@@ -1501,8 +1501,8 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
}
nop = true;
}
- int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
- int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+ int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+ int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
// Note that this does not check the MIR's opcode in all cases. In cases where it
@@ -1530,7 +1530,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
for (int i = 0; i < uses; i++) {
str.append(" ");
str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
- if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+ if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
// For the listing, skip the high sreg.
i++;
}
@@ -1623,7 +1623,7 @@ std::string MIRGraph::GetSSAName(int ssa_reg) {
// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
- if (reg_location_ == NULL) {
+ if (reg_location_ == nullptr) {
// Pre-SSA - just use the standard name.
return GetSSAName(ssa_reg);
}
@@ -1716,7 +1716,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bo
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
- if (move_result_mir == NULL) {
+ if (move_result_mir == nullptr) {
info->result.location = kLocInvalid;
} else {
info->result = GetRawDest(move_result_mir);
@@ -2294,7 +2294,7 @@ bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const
void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
// Reset flags for all MIRs in bb.
- for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= (~reset_flags);
}
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d7e4dd9..0db54bf 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -602,7 +602,7 @@ class MIRGraph {
BasicBlock* GetBasicBlock(unsigned int block_id) const {
DCHECK_LT(block_id, block_list_.size()); // NOTE: NullBasicBlockId is 0.
- return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
+ return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
}
size_t GetBasicBlockListCount() const {
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 3706012..946c74b 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -88,7 +88,7 @@ class MirMethodInfo {
// The type index of the class declaring the method, 0 if unresolved.
uint16_t declaring_class_idx_;
// The dex file that defines the class containing the method and the method,
- // nullptr if unresolved.
+ // null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -223,7 +223,7 @@ class MirMethodLoweringInfo : public MirMethodInfo {
uintptr_t direct_code_;
uintptr_t direct_method_;
// Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
- // devirtualized invoke target if available, nullptr and 0u otherwise.
+ // devirtualized invoke target if available, null and 0u otherwise.
// After Resolve() they hold the actual target method that will be called; it will be either
// a devirtualized target method or the compilation's unit's dex file and MethodIndex().
const DexFile* target_dex_file_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 546e67a..467c14e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -55,7 +55,7 @@ void MIRGraph::SetConstantWide(int32_t ssa_reg, int64_t value) {
void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
MIR* mir;
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// Skip pass if BB has MIR without SSA representation.
if (mir->ssa_rep == nullptr) {
return;
@@ -116,11 +116,11 @@ void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
/* Advance to next strictly dominated MIR node in an extended basic block */
MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
- if (mir != NULL) {
+ if (mir != nullptr) {
mir = mir->next;
- while (mir == NULL) {
+ while (mir == nullptr) {
bb = GetBasicBlock(bb->fall_through);
- if ((bb == NULL) || Predecessors(bb) != 1) {
+ if ((bb == nullptr) || Predecessors(bb) != 1) {
// mir is null and we cannot proceed further.
break;
} else {
@@ -134,7 +134,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
/*
* To be used at an invoke mir. If the logically next mir node represents
- * a move-result, return it. Else, return NULL. If a move-result exists,
+ * a move-result, return it. Else, return nullptr. If a move-result exists,
* it is required to immediately follow the invoke with no intervening
* opcodes or incoming arcs. However, if the result of the invoke is not
* used, a move-result may not be present.
@@ -142,7 +142,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* tbb = bb;
mir = AdvanceMIR(&tbb, mir);
- while (mir != NULL) {
+ while (mir != nullptr) {
if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
@@ -152,7 +152,7 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
mir = AdvanceMIR(&tbb, mir);
} else {
- mir = NULL;
+ mir = nullptr;
}
}
return mir;
@@ -160,29 +160,29 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
if (bb->block_type == kDead) {
- return NULL;
+ return nullptr;
}
DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock));
BasicBlock* bb_taken = GetBasicBlock(bb->taken);
BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
- if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
+ if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
// Follow simple unconditional branches.
bb = bb_taken;
} else {
// Follow simple fallthrough
- bb = (bb_taken != NULL) ? NULL : bb_fall_through;
+ bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
}
- if (bb == NULL || (Predecessors(bb) != 1)) {
- return NULL;
+ if (bb == nullptr || (Predecessors(bb) != 1)) {
+ return nullptr;
}
DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
return bb;
}
static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
if (mir->ssa_rep->uses[i] == ssa_name) {
@@ -191,11 +191,11 @@ static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
}
}
}
- return NULL;
+ return nullptr;
}
static SelectInstructionKind SelectKind(MIR* mir) {
- // Work with the case when mir is nullptr.
+ // Work with the case when mir is null.
if (mir == nullptr) {
return kSelectNone;
}
@@ -256,7 +256,8 @@ size_t MIRGraph::GetNumAvailableVRTemps() {
}
// Calculate remaining ME temps available.
- size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+ size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
+ reserved_temps_for_backend_;
if (num_non_special_compiler_temps_ >= remaining_me_temps) {
return 0;
@@ -347,7 +348,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
size_t available_temps = GetNumAvailableVRTemps();
if (available_temps <= 0 || (available_temps <= 1 && wide)) {
if (verbose) {
- LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+ LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
+ << " are available.";
}
return nullptr;
}
@@ -365,8 +367,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
if (verbose) {
- LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
- << " and s" << compiler_temp->s_reg_low << " has been created.";
+ LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
+ << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
}
if (wide) {
@@ -478,8 +480,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
allocator.get()));
}
- while (bb != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ while (bb != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// TUNING: use the returned value number for CSE.
if (use_lvn) {
local_valnum->GetValueNumber(mir);
@@ -538,7 +540,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Bitcode doesn't allow this optimization.
break;
}
- if (mir->next != NULL) {
+ if (mir->next != nullptr) {
MIR* mir_next = mir->next;
// Make sure result of cmp is used by next insn and nowhere else
if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
@@ -594,12 +596,12 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
- DCHECK(ft != NULL);
+ DCHECK(ft != nullptr);
BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
BasicBlock* ft_tk = GetBasicBlock(ft->taken);
BasicBlock* tk = GetBasicBlock(bb->taken);
- DCHECK(tk != NULL);
+ DCHECK(tk != nullptr);
BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
BasicBlock* tk_tk = GetBasicBlock(tk->taken);
@@ -608,7 +610,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
* transfers to the rejoin block and the fall_though edge goes to a block that
* unconditionally falls through to the rejoin block.
*/
- if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+ if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
(Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
/*
* Okay - we have the basic diamond shape.
@@ -628,7 +630,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
MIR* if_false = ft->first_mir_insn;
// It's possible that the target of the select isn't used - skip those (rare) cases.
MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
- if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+ if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
/*
* We'll convert the IF_EQZ/IF_NEZ to a SELECT. We need to find the
* Phi node in the merge block and delete it (while using the SSA name
@@ -712,7 +714,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
}
}
- bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
+ bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
+ nullptr;
}
if (use_lvn && UNLIKELY(!global_valnum->Good())) {
LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -723,9 +726,9 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
/* Collect stats on number of checks removed */
void MIRGraph::CountChecks(class BasicBlock* bb) {
- if (bb->data_flow_info != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ if (bb->data_flow_info != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -926,7 +929,7 @@ bool MIRGraph::EliminateNullChecksGate() {
// reset MIR_MARK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= ~MIR_MARK;
}
}
@@ -1001,7 +1004,7 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
// no intervening uses.
// Walk through the instruction in the block, updating as necessary
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
@@ -1112,7 +1115,7 @@ void MIRGraph::EliminateNullChecksEnd() {
// converge MIR_MARK with MIR_IGNORE_NULL_CHECK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
@@ -1503,7 +1506,7 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
continue;
}
@@ -1534,7 +1537,8 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
->GenInline(this, bb, mir, target.dex_method_index)) {
if (cu_->verbose || cu_->print_pass) {
LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
- << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
+ << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
+ *target.dex_file)
<< "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< "\" @0x" << std::hex << mir->offset;
}
@@ -1558,7 +1562,7 @@ void MIRGraph::DumpCheckStats() {
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
checkstats_ = stats;
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
CountChecks(bb);
}
if (stats->null_checks > 0) {
@@ -1591,7 +1595,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
bool terminated_by_return = false;
bool do_local_value_numbering = false;
// Visit blocks strictly dominated by this head.
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->visited = true;
terminated_by_return |= bb->terminated_by_return;
do_local_value_numbering |= bb->use_lvn;
@@ -1600,7 +1604,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (terminated_by_return || do_local_value_numbering) {
// Do lvn for all blocks in this extended set.
bb = start_bb;
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->use_lvn = do_local_value_numbering;
bb->dominates_return = terminated_by_return;
bb = NextDominatedBlock(bb);
@@ -1623,7 +1627,7 @@ void MIRGraph::BasicBlockOptimization() {
if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
ClearAllVisitedFlags();
PreOrderDfsIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
BuildExtendedBBList(bb);
}
// Perform extended basic block optimizations.
@@ -1632,7 +1636,7 @@ void MIRGraph::BasicBlockOptimization() {
}
} else {
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
BasicBlockOpt(bb);
}
}
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 671bcec..8762b53 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -68,7 +68,7 @@ class PassDriver {
* @return whether the pass was applied.
*/
virtual bool RunPass(const char* pass_name) {
- // Paranoid: c_unit cannot be nullptr and we need a pass name.
+ // Paranoid: c_unit cannot be null and we need a pass name.
DCHECK(pass_name != nullptr);
DCHECK_NE(pass_name[0], 0);
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 94eef22..cbe4a02 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -88,7 +88,7 @@ class PassDriverME: public PassDriver {
}
bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
- // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+ // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
DCHECK(pass != nullptr);
DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
@@ -211,8 +211,9 @@ class PassDriverME: public PassDriver {
* @param settings_to_fill Fills the options to contain the mapping of name of option to the new
* configuration.
*/
- static void FillOverriddenPassSettings(const PassManagerOptions* options, const char* pass_name,
- SafeMap<const std::string, const OptionContent>& settings_to_fill) {
+ static void FillOverriddenPassSettings(
+ const PassManagerOptions* options, const char* pass_name,
+ SafeMap<const std::string, const OptionContent>& settings_to_fill) {
const std::string& settings = options->GetOverriddenPassOptions();
const size_t settings_len = settings.size();
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index c5ac4c1..df4a9f2 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1055,7 +1055,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -1066,7 +1066,7 @@ void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -1084,7 +1084,7 @@ void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
uint8_t* const write_buffer = write_pos;
- for (; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = (write_pos - write_buffer);
if (!lir->flags.is_nop) {
int opcode = lir->opcode;
@@ -1258,8 +1258,8 @@ void ArmMir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
/*
* NOTE: the lir being considered here will be encoded following the switch (so long as
* we're not in a retry situation). However, any new non-pc_rel instructions inserted
@@ -1506,7 +1506,7 @@ void ArmMir2Lir::AssembleLIR() {
case kFixupAdr: {
const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
LIR* target = lir->target;
- int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != nullptr) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
offset_adjustment);
int32_t disp = target_disp - ((lir->offset + 4) & ~3);
@@ -1642,7 +1642,7 @@ size_t ArmMir2Lir::GetInsnSize(LIR* lir) {
uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
if (!lir->flags.is_nop) {
if (lir->flags.fixup != kFixupNone) {
@@ -1658,8 +1658,8 @@ uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offse
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3d18af6..6ba4016 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -124,7 +124,7 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -156,7 +156,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
@@ -165,12 +165,12 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
// r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -238,7 +238,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
if (!kUseReadBarrier) {
@@ -252,16 +252,16 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 62903af..8d20f1b 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -138,10 +138,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocatio
RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, -1);
OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
- LIR* branch1 = OpCondBranch(kCondLt, NULL);
- LIR* branch2 = OpCondBranch(kCondGt, NULL);
+ LIR* branch1 = OpCondBranch(kCondLt, nullptr);
+ LIR* branch2 = OpCondBranch(kCondGt, nullptr);
OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
- LIR* branch3 = OpCondBranch(kCondEq, NULL);
+ LIR* branch3 = OpCondBranch(kCondEq, nullptr);
LIR* it = OpIT(kCondHi, "E");
NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
@@ -389,7 +389,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va
* generate the long form in an attempt to avoid an extra assembly pass.
* TODO: consider interspersing slowpaths in code following unconditional branches.
*/
- bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
+ bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
if (!skip && reg.Low8() && (check_value == 0)) {
if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
@@ -1159,12 +1159,12 @@ void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
#ifdef ARM_R4_SUSPEND_FLAG
NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
- return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+ return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
#else
RegStorage t_reg = AllocTemp();
LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
t_reg, kUnsignedHalf, kNotVolatile);
- LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
+ LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
0, target);
FreeTemp(t_reg);
return cmp_branch;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 25ea694..2ef92f8 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -90,7 +90,7 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
}
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&literal_list_, value);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -411,7 +411,7 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_s
return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
}
@@ -695,7 +695,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
}
LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- LIR* res = NULL;
+ LIR* res = nullptr;
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
if (r_dest.IsFloat()) {
@@ -721,10 +721,10 @@ LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
}
}
- if (res == NULL) {
+ if (res == nullptr) {
// No short form - load from the literal pool.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -814,7 +814,7 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
- LIR* store = NULL;
+ LIR* store = nullptr;
ArmOpcode opcode = kThumbBkpt;
bool thumb_form = (all_low_regs && (scale == 0));
RegStorage reg_ptr;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2f1ae66..b78fb80 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -663,7 +663,7 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -674,7 +674,7 @@ void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -889,8 +889,8 @@ void Arm64Mir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requiring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
// NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
// the time of insertion. Note that inserted instructions don't need use/def flags, but do
// need size and pc-rel status properly updated.
@@ -1037,7 +1037,7 @@ void Arm64Mir2Lir::AssembleLIR() {
// Check that the instruction preceding the multiply-accumulate is a load or store.
if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
// insert a NOP between the load/store and the multiply-accumulate.
- LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+ LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
new_lir->offset = lir->offset;
new_lir->flags.fixup = kFixupNone;
new_lir->flags.size = EncodingMap[kA64Nop0].size;
@@ -1108,7 +1108,7 @@ size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) {
uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
A64Opcode opcode = UNWIDE(lir->opcode);
if (!lir->flags.is_nop) {
@@ -1123,8 +1123,8 @@ uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t off
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 4abbd77..9a7c2ad 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -127,7 +127,7 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, key_reg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -167,7 +167,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -176,12 +176,12 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
// w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
not_unlocked_branch->target = slow_path_target;
@@ -220,7 +220,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -235,16 +235,16 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b7dbd0a..9340d01 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -803,7 +803,7 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
- LIR* early_exit = OpCondBranch(kCondNe, NULL);
+ LIR* early_exit = OpCondBranch(kCondNe, nullptr);
NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e9ad8ba..483231f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -121,7 +121,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
// Wide, as we need 8B alignment.
data_target = AddWideData(&literal_list_, value, 0);
}
@@ -148,7 +148,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -525,7 +525,7 @@ LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -624,7 +624,7 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
@@ -658,7 +658,7 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
@@ -1190,7 +1190,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
*/
LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
- LIR* load = NULL;
+ LIR* load = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
@@ -1286,7 +1286,7 @@ LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- LIR* store = NULL;
+ LIR* store = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f4a318..fb68335 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1080,7 +1080,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for nullptr.
+ // Reserve pointer id 0 for null.
size_t null_idx = WrapPointer<void>(nullptr);
DCHECK_EQ(null_idx, 0U);
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index ca31dbf..f5e6c09 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -392,7 +392,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
- dex_file_(NULL) {
+ dex_file_(nullptr) {
static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
"bad arraysize for kClassCacheNames");
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1a72cd7..de5e041 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -87,7 +87,7 @@ void Mir2Lir::GenIfNullUseHelperImmMethod(
const RegStorage r_result_;
};
- LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
@@ -113,10 +113,10 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
}
- // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved.
+ // r_base now points at static storage (Class*) or null if the type is not yet resolved.
LIR* unresolved_branch = nullptr;
if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
- // Check if r_base is nullptr.
+ // Check if r_base is null.
unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
}
LIR* uninit_branch = nullptr;
@@ -136,8 +136,8 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
// There are up to two branches to the static field slow path, the "unresolved" when the type
- // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized.
- // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path.
+ // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+ // At least one will be non-null here, otherwise we wouldn't generate the slow path.
StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
RegStorage r_base_in, RegStorage r_method_in)
: LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
@@ -165,7 +165,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
}
private:
- // Second branch to the slow path, or nullptr if there's only one branch.
+ // Second branch to the slow path, or null if there's only one branch.
LIR* const second_branch_;
const int storage_index_;
@@ -173,7 +173,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
RegStorage r_method_;
};
- // The slow path is invoked if the r_base is nullptr or the class pointed
+ // The slow path is invoked if the r_base is null or the class pointed
// to by it is not initialized.
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -319,7 +319,7 @@ LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
/* Perform an explicit null-check on a register. */
LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
+ return nullptr;
}
return GenNullCheck(m_reg);
}
@@ -1188,7 +1188,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
DCHECK(!IsSameReg(result_reg, object.reg));
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
RegStorage check_class = AllocTypedTemp(false, kRefReg);
RegStorage object_class = AllocTypedTemp(false, kRefReg);
@@ -1287,7 +1287,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
// On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
LoadConstant(rl_result.reg, 0);
}
- LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
/* load object->klass_ */
RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref.
@@ -1295,7 +1295,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
ref_class_reg, kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
- LIR* branchover = NULL;
+ LIR* branchover = nullptr;
if (type_known_final) {
// rl_result == ref == class.
GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
@@ -1320,7 +1320,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (!type_known_abstract) {
/* Uses branchovers */
LoadConstant(rl_result.reg, 1); // assume true
- branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
+ branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
}
OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class
@@ -2129,7 +2129,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) {
}
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
FlushAllRegs();
- LIR* branch = OpTestSuspend(NULL);
+ LIR* branch = OpTestSuspend(nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
} else {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 54e5742..4215e8b 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -46,7 +46,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
if (rl_src.location == kLocPhysReg) {
OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
- // On 64-bit targets, will sign extend. Make sure constant reference is always NULL.
+ // On 64-bit targets, will sign extend. Make sure constant reference is always null.
DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 936ff42..f9b9684 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -613,7 +613,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
LOG(FATAL) << "Unexpected branch kind " << opcode;
UNREACHABLE();
}
- LIR* hop_target = NULL;
+ LIR* hop_target = nullptr;
if (!unconditional) {
hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
@@ -650,7 +650,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success.
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
@@ -668,7 +668,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
* (label2 - label1), where label1 is a standard
* kPseudoTargetLabel and is stored in operands[2].
* If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
+ * and is found in lir->target. If operands[3] is non-nullptr,
* then it is a Switch/Data table.
*/
int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
@@ -863,7 +863,7 @@ int MipsMir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(lir->opcode >= 0)) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 05570e4..39b9cc7 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -112,7 +112,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
// Test loop.
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
Load32Disp(r_base, 0, r_key);
OpRegImm(kOpAdd, r_base, 8);
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
@@ -188,7 +188,7 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
tab_rec->anchor = base_label;
// Bounds check - if < 0 or >= size continue following switch.
- LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+ LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
// Materialize the table base pointer.
RegStorage r_base = AllocPtrSizeTemp();
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1ca8bb6..9319c64 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -68,7 +68,7 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati
NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
@@ -128,7 +128,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage sr
break;
default:
LOG(FATAL) << "No support for ConditionCode: " << cond;
- return NULL;
+ return nullptr;
}
if (cmp_zero) {
branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
@@ -278,7 +278,7 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
LoadConstant(rs_dest, false_val);
LIR* target_label = NewLIR0(kPseudoTargetLabel);
ne_branchover->target = target_label;
@@ -447,7 +447,7 @@ void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
// Test suspend flag, return target of taken suspend branch.
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
+ return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
}
// Decrement register and branch on condition.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8ab5422..95c61cd 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -566,7 +566,7 @@ LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
/* Load value from base + scaled index. */
LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
LIR *res;
MipsOpCode opcode = kMipsNop;
bool is64bit = cu_->target64 && r_dest.Is64Bit();
@@ -640,7 +640,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
// Store value base base + scaled index.
LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
MipsOpCode opcode = kMipsNop;
RegStorage t_reg = AllocTemp();
@@ -696,8 +696,8 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
* rlp and then restore.
*/
LIR *res;
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
@@ -857,8 +857,8 @@ LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r
LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
LIR *res;
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 2deb727..e9e9161 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1219,7 +1219,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
block_label_list_[block_id].flags.fixup = kFixupLabel;
AppendLIR(&block_label_list_[block_id]);
- LIR* head_lir = NULL;
+ LIR* head_lir = nullptr;
// If this is a catch block, export the start address.
if (bb->catch_entry) {
@@ -1245,7 +1245,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
}
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
ResetRegPool();
if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
ClobberAllTemps();
@@ -1269,7 +1269,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
GenPrintLabel(mir);
// Remember the first LIR for this block.
- if (head_lir == NULL) {
+ if (head_lir == nullptr) {
head_lir = &block_label_list_[bb->id];
// Set the first label as a scheduling barrier.
DCHECK(!head_lir->flags.use_def_invalid);
@@ -1309,7 +1309,7 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
- BasicBlock*bb = NULL;
+ BasicBlock*bb = nullptr;
for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
if (candidate->block_type == kDalvikByteCode) {
@@ -1317,11 +1317,11 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
break;
}
}
- if (bb == NULL) {
+ if (bb == nullptr) {
return false;
}
DCHECK_EQ(bb->start_offset, 0);
- DCHECK(bb->first_mir_insn != NULL);
+ DCHECK(bb->first_mir_insn != nullptr);
// Get the first instruction.
MIR* mir = bb->first_mir_insn;
@@ -1343,17 +1343,17 @@ void Mir2Lir::MethodMIR2LIR() {
PreOrderDfsIterator iter(mir_graph_);
BasicBlock* curr_bb = iter.Next();
BasicBlock* next_bb = iter.Next();
- while (curr_bb != NULL) {
+ while (curr_bb != nullptr) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
- if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {
next_bb = iter.Next();
- } while ((next_bb != NULL) && (next_bb->block_type == kDead));
+ } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
}
HandleSlowPaths();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f9efe37..8f08a51 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -388,7 +388,7 @@ class Mir2Lir {
LIR* DefEnd() { return def_end_; }
void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
void ResetDefBody() { def_start_ = def_end_ = nullptr; }
- // Find member of aliased set matching storage_used; return nullptr if none.
+ // Find member of aliased set matching storage_used; return null if none.
RegisterInfo* FindMatchingView(uint32_t storage_used) {
RegisterInfo* res = Master();
for (; res != nullptr; res = res->GetAliasChain()) {
@@ -605,7 +605,7 @@ class Mir2Lir {
char* ArenaStrdup(const char* str) {
size_t len = strlen(str) + 1;
char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
- if (res != NULL) {
+ if (res != nullptr) {
strncpy(res, str, len);
}
return res;
@@ -650,7 +650,7 @@ class Mir2Lir {
void DumpPromotionMap();
void CodegenDump();
LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
- int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+ int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
LIR* NewLIR0(int opcode);
LIR* NewLIR1(int opcode, int dest);
LIR* NewLIR2(int opcode, int dest, int src1);
@@ -1120,8 +1120,8 @@ class Mir2Lir {
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
- * @param target branch target (or nullptr)
- * @param compare output for getting LIR for comparison (or nullptr)
+ * @param target branch target (or null)
+ * @param compare output for getting LIR for comparison (or null)
* @returns The branch instruction that was generated.
*/
virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
@@ -1854,7 +1854,7 @@ class Mir2Lir {
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
- // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+ // Record the MIR that generated a given safepoint (null for prologue safepoints).
ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
// The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
@@ -1869,7 +1869,7 @@ class Mir2Lir {
// For architectures that don't have true PC-relative addressing (see pc_rel_temp_
// above) and also have a limited range of offsets for loads, it's be useful to
// know the minimum offset into the dex cache arrays, so we calculate that as well
- // if pc_rel_temp_ isn't nullptr.
+ // if pc_rel_temp_ isn't null.
uint32_t dex_cache_arrays_min_offset_;
dwarf::LazyDebugFrameOpCodeWriter cfi_;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 555d5b9..b3c7355 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -100,7 +100,7 @@ class QuickCFITest : public CFITest {
}
}
m2l->AdjustSpillMask();
- m2l->GenEntrySequence(NULL, m2l->LocCReturnRef());
+ m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
m2l->GenExitSequence();
m2l->HandleSlowPaths();
m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fc3e687..39eb117 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -102,7 +102,7 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
"kDisabledOpts unexpected");
-// Supported shorty types per instruction set. nullptr means that all are available.
+// Supported shorty types per instruction set. null means that all are available.
// Z : boolean
// B : byte
// S : short
@@ -422,7 +422,7 @@ static int kInvokeOpcodes[] = {
Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
};
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// Unsupported opcodes. null can be used when everything is supported. Size of the lists is
// recorded below.
static const int* kUnsupportedOpcodes[] = {
// 0 = kNone.
@@ -515,7 +515,7 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e779479..8ec86fa 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -935,7 +935,7 @@ bool Mir2Lir::CheckCorePoolSanity() {
RegStorage my_reg = info->GetReg();
RegStorage partner_reg = info->Partner();
RegisterInfo* partner = GetRegInfo(partner_reg);
- DCHECK(partner != NULL);
+ DCHECK(partner != nullptr);
DCHECK(partner->IsWide());
DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
DCHECK(partner->IsLive());
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index af19f5e..eb33357 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1633,7 +1633,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
AssemblerStatus res = kSuccess; // Assume success
const bool kVerbosePcFixup = false;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (IsPseudoLirOp(lir->opcode)) {
continue;
}
@@ -1646,7 +1646,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
switch (lir->opcode) {
case kX86Jcc8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1679,7 +1679,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jcc32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1695,7 +1695,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jecxz8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc;
pc = lir->offset + 2; // opcode + rel8
CodeOffset target = target_lir->offset;
@@ -1706,7 +1706,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1738,7 +1738,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1748,7 +1748,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
default:
if (lir->flags.fixup == kFixupLoad) {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset target = target_lir->offset;
// Handle 64 bit RIP addressing.
if (lir->operands[1] == kRIPReg) {
@@ -1950,7 +1950,7 @@ int X86Mir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d7a5eb0..e2364d8 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -80,7 +80,7 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
RegStorage addr_for_jump;
if (cu_->target64) {
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 10af31a..8e81746 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -484,13 +484,13 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
} else {
NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
- LIR* branch = NULL;
+ LIR* branch = nullptr;
if (unordered_gt) {
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
if (!IsByteRegister(rl_result.reg)) {
- LIR* branch2 = NULL;
+ LIR* branch2 = nullptr;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
@@ -513,7 +513,7 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
- LIR* branch = NULL;
+ LIR* branch = nullptr;
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 2c13b61..943bfc0 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1569,7 +1569,7 @@ LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
} else {
OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
}
- return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+ return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
}
// Decrement register and branch on condition
@@ -3005,7 +3005,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
// We will use this register to compare to memory below.
// References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index a16e242..b460379 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1281,7 +1281,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
- // Is the string non-NULL?
+ // Is the string non-null?
LoadValueDirectFixed(rl_obj, rs_rDX);
GenNullCheck(rs_rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index efcb9ee..61a1bec 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -578,7 +578,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
} else if (pc_rel_base_reg_.Valid() || cu_->target64) {
// We will load the value from the literal area.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -642,8 +642,8 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_dest, OpSize size) {
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_dest.IsPair();
bool is64bit = ((size == k64) || (size == kDouble));
@@ -763,7 +763,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
}
}
- // Always return first load generated as this might cause a fault if base is nullptr.
+ // Always return first load generated as this might cause a fault if base is null.
return load;
}
@@ -791,8 +791,8 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_src, OpSize size,
int opt_flags) {
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_src.IsPair();
bool is64bit = (size == k64) || (size == kDouble);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 197f66d..939bf40 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -26,15 +26,15 @@ namespace art {
void MIRGraph::ClearAllVisitedFlags() {
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
bb->visited = false;
}
}
BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
- if (bb != NULL) {
+ if (bb != nullptr) {
if (bb->visited || bb->hidden) {
- bb = NULL;
+ bb = nullptr;
}
}
return bb;
@@ -42,13 +42,13 @@ BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
- if (res == NULL) {
+ if (res == nullptr) {
res = NeedsVisit(GetBasicBlock(bb->taken));
- if (res == NULL) {
+ if (res == nullptr) {
if (bb->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
res = NeedsVisit(GetBasicBlock(sbi->block));
- if (res != NULL) {
+ if (res != nullptr) {
break;
}
}
@@ -75,7 +75,7 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) {
while (!succ.empty()) {
BasicBlock* curr = succ.back();
BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
- if (next_successor != NULL) {
+ if (next_successor != nullptr) {
MarkPreOrder(next_successor);
succ.push_back(next_successor);
continue;
@@ -107,7 +107,7 @@ void MIRGraph::ComputeDFSOrders() {
if (num_reachable_blocks_ != GetNumBlocks()) {
// Kill all unreachable blocks.
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (!bb->visited) {
bb->Kill(this);
}
@@ -121,7 +121,7 @@ void MIRGraph::ComputeDFSOrders() {
* register idx is defined in BasicBlock bb.
*/
bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
@@ -149,11 +149,11 @@ void MIRGraph::ComputeDefBlockMatrix() {
}
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
FindLocalLiveIn(bb);
}
AllNodesIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
FillDefBlockMatrix(bb);
}
@@ -247,7 +247,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
int num_total_blocks = GetBasicBlockListCount();
- if (bb->dominators == NULL) {
+ if (bb->dominators == nullptr) {
bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
true /* expandable */, kBitMapDominators);
bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
@@ -357,7 +357,7 @@ void MIRGraph::ComputeDominators() {
/* Initialize domination-related data structures */
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
InitializeDominationInfo(bb);
}
@@ -376,7 +376,7 @@ void MIRGraph::ComputeDominators() {
/* Compute the immediate dominators */
RepeatingReversePostOrderDfsIterator iter2(this);
bool change = false;
- for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+ for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
change = ComputeblockIDom(bb);
}
@@ -387,19 +387,19 @@ void MIRGraph::ComputeDominators() {
GetEntryBlock()->i_dom = 0;
PreOrderDfsIterator iter3(this);
- for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+ for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
SetDominators(bb);
}
ReversePostOrderDfsIterator iter4(this);
- for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+ for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
ComputeBlockDominators(bb);
}
// Compute the dominance frontier for each block.
ComputeDomPostOrderTraversal(GetEntryBlock());
PostOrderDOMIterator iter5(this);
- for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+ for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
@@ -434,7 +434,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
@@ -466,7 +466,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
void MIRGraph::FindPhiNodeBlocks() {
RepeatingPostOrderDfsIterator iter(this);
bool change = false;
- for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+ for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
change = ComputeBlockLiveIns(bb);
}
@@ -505,7 +505,7 @@ void MIRGraph::FindPhiNodeBlocks() {
*/
bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
/* Phi nodes are at the beginning of each block */
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
return true;
int ssa_reg = mir->ssa_rep->defs[0];
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a4df00e..c1d5cb7 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,7 +46,7 @@ VerificationResults::~VerificationResults() {
}
bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
- DCHECK(method_verifier != NULL);
+ DCHECK(method_verifier != nullptr);
MethodReference ref = method_verifier->GetMethodReference();
bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 977757f..7eba515 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -166,7 +166,7 @@ void VerifiedMethod::VerifyGcMap(verifier::MethodVerifier* method_verifier,
}
}
} else {
- DCHECK(i >= 65536 || reg_bitmap == NULL);
+ DCHECK(i >= 65536 || reg_bitmap == nullptr);
}
}
}
@@ -283,7 +283,7 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
}
mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
- if (abstract_method == NULL) {
+ if (abstract_method == nullptr) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
continue;
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 437ae52..ad07639 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -59,7 +59,7 @@ class VerifiedMethod {
return safe_cast_set_;
}
- // Returns the devirtualization target method, or nullptr if none.
+ // Returns the devirtualization target method, or null if none.
const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
// Returns the dequicken field / method for a quick invoke / field get. Returns null if there is