summaryrefslogtreecommitdiffstats
path: root/compiler/dex/quick
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/dex/quick')
-rw-r--r--compiler/dex/quick/arm/call_arm.cc6
-rw-r--r--compiler/dex/quick/arm/target_arm.cc18
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc6
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc12
-rw-r--r--compiler/dex/quick/codegen_util.cc49
-rw-r--r--compiler/dex/quick/gen_common.cc5
-rw-r--r--compiler/dex/quick/mips/call_mips.cc6
-rw-r--r--compiler/dex/quick/mips/target_mips.cc12
-rw-r--r--compiler/dex/quick/mir_to_lir-inl.h13
-rw-r--r--compiler/dex/quick/mir_to_lir.cc11
-rw-r--r--compiler/dex/quick/mir_to_lir.h38
-rw-r--r--compiler/dex/quick/ralloc_util.cc99
-rw-r--r--compiler/dex/quick/x86/call_x86.cc4
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h6
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc60
15 files changed, 161 insertions, 184 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index fc98d31..f6588fe 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -55,7 +55,7 @@ void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocati
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -104,7 +104,7 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati
uint32_t size = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -158,7 +158,7 @@ void ArmMir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 0be478d..aaf4449 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -568,16 +568,16 @@ Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
}
void ArmMir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */, sp_regs,
- dp_regs, reserved_regs, empty_pool /* reserved64 */,
- core_temps, empty_pool /* core64_temps */, sp_temps,
- dp_temps);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
+ sp_regs, dp_regs,
+ reserved_regs, empty_pool /* reserved64 */,
+ core_temps, empty_pool /* core64_temps */,
+ sp_temps, dp_temps));
// Target-specific adjustments.
// Alias single precision floats to appropriate half of overlapping double.
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
int dp_reg_num = sp_reg_num >> 1;
RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
@@ -784,8 +784,7 @@ RegStorage ArmMir2Lir::AllocPreservedDouble(int s_reg) {
* TODO: until runtime support is in, make sure we avoid promoting the same vreg to
* different underlying physical registers.
*/
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->dp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->dp_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
info->MarkInUse();
@@ -809,8 +808,7 @@ RegStorage ArmMir2Lir::AllocPreservedDouble(int s_reg) {
// Reserve a callee-save sp single register.
RegStorage ArmMir2Lir::AllocPreservedSingle(int s_reg) {
RegStorage res;
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
int p_map_idx = SRegToPMap(s_reg);
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index b9c0990..6081f28 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -55,7 +55,7 @@ void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLoca
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -108,7 +108,7 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca
uint32_t size = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -167,7 +167,7 @@ void Arm64Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocatio
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index d7d5651..fe0554c 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -602,14 +602,13 @@ Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph
}
void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, core64_regs, sp_regs, dp_regs,
- reserved_regs, reserved64_regs, core_temps, core64_temps,
- sp_temps, dp_temps);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, core64_regs, sp_regs, dp_regs,
+ reserved_regs, reserved64_regs,
+ core_temps, core64_temps, sp_temps, dp_temps));
// Target-specific adjustments.
// Alias single precision float registers to corresponding double registers.
- GrowableArray<RegisterInfo*>::Iterator fp_it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = fp_it.Next(); info != nullptr; info = fp_it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int fp_reg_num = info->GetReg().GetRegNum();
RegStorage dp_reg = RegStorage::FloatSolo64(fp_reg_num);
RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
@@ -622,8 +621,7 @@ void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
}
// Alias 32bit W registers to corresponding 64bit X registers.
- GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
- for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
int x_reg_num = info->GetReg().GetRegNum();
RegStorage x_reg = RegStorage::Solo64(x_reg_num);
RegisterInfo* x_reg_info = GetRegInfo(x_reg);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index d90bce1..bd2a942 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -530,10 +530,7 @@ void Mir2Lir::InstallLiteralPools() {
/* Write the switch tables to the output stream */
void Mir2Lir::InstallSwitchTables() {
- GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
- while (true) {
- Mir2Lir::SwitchTable* tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
AlignBuffer(code_buffer_, tab_rec->offset);
/*
* For Arm, our reference point is the address of the bx
@@ -590,10 +587,7 @@ void Mir2Lir::InstallSwitchTables() {
/* Write the fill array dta to the output stream */
void Mir2Lir::InstallFillArrayData() {
- GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
- while (true) {
- Mir2Lir::FillArrayData *tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::FillArrayData* tab_rec : fill_array_data_) {
AlignBuffer(code_buffer_, tab_rec->offset);
for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
code_buffer_.push_back(tab_rec->table[i] & 0xFF);
@@ -801,10 +795,7 @@ int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
}
int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
- GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
- while (true) {
- Mir2Lir::SwitchTable* tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
tab_rec->offset = offset;
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
offset += tab_rec->table[1] * (sizeof(int) * 2);
@@ -818,15 +809,12 @@ int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
}
int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
- GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
- while (true) {
- Mir2Lir::FillArrayData *tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::FillArrayData* tab_rec : fill_array_data_) {
tab_rec->offset = offset;
offset += tab_rec->size;
// word align
offset = RoundUp(offset, 4);
- }
+ }
return offset;
}
@@ -878,10 +866,7 @@ void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
}
void Mir2Lir::ProcessSwitchTables() {
- GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
- while (true) {
- Mir2Lir::SwitchTable *tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
MarkPackedCaseLabels(tab_rec);
} else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -1006,18 +991,18 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
first_fixup_(NULL),
cu_(cu),
mir_graph_(mir_graph),
- switch_tables_(arena, 4, kGrowableArraySwitchTables),
- fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
- tempreg_info_(arena, 20, kGrowableArrayMisc),
- reginfo_map_(arena, RegStorage::kMaxRegs, kGrowableArrayMisc),
- pointer_storage_(arena, 128, kGrowableArrayMisc),
+ switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
+ fill_array_data_(arena->Adapter(kArenaAllocFillArrayData)),
+ tempreg_info_(arena->Adapter()),
+ reginfo_map_(arena->Adapter()),
+ pointer_storage_(arena->Adapter()),
data_offset_(0),
total_size_(0),
block_label_list_(NULL),
promotion_map_(NULL),
current_dalvik_offset_(0),
estimated_native_code_size_(0),
- reg_pool_(NULL),
+ reg_pool_(nullptr),
live_sreg_(0),
core_vmap_table_(mir_graph->GetArena()->Adapter()),
fp_vmap_table_(mir_graph->GetArena()->Adapter()),
@@ -1028,9 +1013,15 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
fp_spill_mask_(0),
first_lir_insn_(NULL),
last_lir_insn_(NULL),
- slow_paths_(arena, 32, kGrowableArraySlowPaths),
+ slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena) {
+ switch_tables_.reserve(4);
+ fill_array_data_.reserve(4);
+ tempreg_info_.reserve(20);
+ reginfo_map_.reserve(RegStorage::kMaxRegs);
+ pointer_storage_.reserve(128);
+ slow_paths_.reserve(32);
// Reserve pointer id 0 for NULL.
size_t null_idx = WrapPointer(NULL);
DCHECK_EQ(null_idx, 0U);
@@ -1223,7 +1214,7 @@ LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStor
}
void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) {
- slow_paths_.Insert(slowpath);
+ slow_paths_.push_back(slowpath);
ResetDefTracking();
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index fbe710b..9f7a881 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -757,11 +757,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Typ
void Mir2Lir::HandleSlowPaths() {
// We should check slow_paths_.Size() every time, because a new slow path
// may be created during slowpath->Compile().
- for (size_t i = 0; i < slow_paths_.Size(); ++i) {
- LIRSlowPath* slowpath = slow_paths_.Get(i);
+ for (LIRSlowPath* slowpath : slow_paths_) {
slowpath->Compile();
}
- slow_paths_.Reset();
+ slow_paths_.clear();
}
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index f3edd7e..6536c41 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -74,7 +74,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
int elements = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// The table is composed of 8-byte key/disp pairs
int byte_size = elements * 8;
@@ -151,7 +151,7 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -232,7 +232,7 @@ void MipsMir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 9c4426f..d3719ab 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -430,16 +430,16 @@ bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
}
void MipsMir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */, sp_regs,
- dp_regs, reserved_regs, empty_pool /* reserved64 */,
- core_temps, empty_pool /* core64_temps */, sp_temps,
- dp_temps);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
+ sp_regs, dp_regs,
+ reserved_regs, empty_pool /* reserved64 */,
+ core_temps, empty_pool /* core64_temps */,
+ sp_temps, dp_temps));
// Target-specific adjustments.
// Alias single precision floats to appropriate half of overlapping double.
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
#if (FR_BIT == 0)
int dp_reg_num = sp_reg_num & ~1;
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 22588f3..0aefc2d 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -142,8 +142,9 @@ inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1
*/
inline void Mir2Lir::SetupRegMask(ResourceMask* mask, int reg) {
DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
- DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
- *mask = mask->Union(reginfo_map_.Get(reg)->DefUseMask());
+ DCHECK_LT(static_cast<size_t>(reg), reginfo_map_.size());
+ DCHECK(reginfo_map_[reg] != nullptr) << "No info for 0x" << reg;
+ *mask = mask->Union(reginfo_map_[reg]->DefUseMask());
}
/*
@@ -151,8 +152,9 @@ inline void Mir2Lir::SetupRegMask(ResourceMask* mask, int reg) {
*/
inline void Mir2Lir::ClearRegMask(ResourceMask* mask, int reg) {
DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
- DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
- *mask = mask->ClearBits(reginfo_map_.Get(reg)->DefUseMask());
+ DCHECK_LT(static_cast<size_t>(reg), reginfo_map_.size());
+ DCHECK(reginfo_map_[reg] != nullptr) << "No info for 0x" << reg;
+ *mask = mask->ClearBits(reginfo_map_[reg]->DefUseMask());
}
/*
@@ -256,8 +258,7 @@ inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
}
inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(RegStorage reg) {
- RegisterInfo* res = reg.IsPair() ? reginfo_map_.Get(reg.GetLowReg()) :
- reginfo_map_.Get(reg.GetReg());
+ RegisterInfo* res = reg.IsPair() ? reginfo_map_[reg.GetLowReg()] : reginfo_map_[reg.GetReg()];
DCHECK(res != nullptr);
return res;
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 96f00e7..6942c0f 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1268,13 +1268,12 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
- int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
+ DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
BasicBlock*bb = NULL;
- for (int idx = 0; idx < num_reachable_blocks; idx++) {
- // TODO: no direct access of growable lists.
- int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
- bb = mir_graph_->GetBasicBlock(dfs_index);
- if (bb->block_type == kDalvikByteCode) {
+ for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
+ BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
+ if (candidate->block_type == kDalvikByteCode) {
+ bb = candidate;
break;
}
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index ea722ab..01aa11d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -33,7 +33,6 @@
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
#include "utils/arena_containers.h"
-#include "utils/growable_array.h"
#include "utils/stack_checks.h"
namespace art {
@@ -437,20 +436,21 @@ class Mir2Lir : public Backend {
static void* operator new(size_t size, ArenaAllocator* arena) {
return arena->Alloc(size, kArenaAllocRegAlloc);
}
+ static void operator delete(void* ptr) { UNUSED(ptr); }
void ResetNextTemp() {
next_core_reg_ = 0;
next_sp_reg_ = 0;
next_dp_reg_ = 0;
}
- GrowableArray<RegisterInfo*> core_regs_;
+ ArenaVector<RegisterInfo*> core_regs_;
int next_core_reg_;
- GrowableArray<RegisterInfo*> core64_regs_;
+ ArenaVector<RegisterInfo*> core64_regs_;
int next_core64_reg_;
- GrowableArray<RegisterInfo*> sp_regs_; // Single precision float.
+ ArenaVector<RegisterInfo*> sp_regs_; // Single precision float.
int next_sp_reg_;
- GrowableArray<RegisterInfo*> dp_regs_; // Double precision float.
+ ArenaVector<RegisterInfo*> dp_regs_; // Double precision float.
int next_dp_reg_;
- GrowableArray<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_
+ ArenaVector<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_
int* next_ref_reg_;
private:
@@ -597,13 +597,13 @@ class Mir2Lir : public Backend {
* may be worth conditionally-compiling a set of identity functions here.
*/
uint32_t WrapPointer(void* pointer) {
- uint32_t res = pointer_storage_.Size();
- pointer_storage_.Insert(pointer);
+ uint32_t res = pointer_storage_.size();
+ pointer_storage_.push_back(pointer);
return res;
}
void* UnwrapPointer(size_t index) {
- return pointer_storage_.Get(index);
+ return pointer_storage_[index];
}
// strdup(), but allocates from the arena.
@@ -713,7 +713,7 @@ class Mir2Lir : public Backend {
void SimpleRegAlloc();
void ResetRegPool();
void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num);
- void DumpRegPool(GrowableArray<RegisterInfo*>* regs);
+ void DumpRegPool(ArenaVector<RegisterInfo*>* regs);
void DumpCoreRegPool();
void DumpFpRegPool();
void DumpRegPools();
@@ -728,7 +728,7 @@ class Mir2Lir : public Backend {
RegStorage AllocPreservedFpReg(int s_reg);
virtual RegStorage AllocPreservedSingle(int s_reg);
virtual RegStorage AllocPreservedDouble(int s_reg);
- RegStorage AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required);
+ RegStorage AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required);
virtual RegStorage AllocTemp(bool required = true);
virtual RegStorage AllocTempWide(bool required = true);
virtual RegStorage AllocTempRef(bool required = true);
@@ -739,7 +739,7 @@ class Mir2Lir : public Backend {
void FlushReg(RegStorage reg);
void FlushRegWide(RegStorage reg);
RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
- RegStorage FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg);
+ RegStorage FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg);
virtual void FreeTemp(RegStorage reg);
virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
virtual bool IsLive(RegStorage reg);
@@ -1676,11 +1676,11 @@ class Mir2Lir : public Backend {
protected:
CompilationUnit* const cu_;
MIRGraph* const mir_graph_;
- GrowableArray<SwitchTable*> switch_tables_;
- GrowableArray<FillArrayData*> fill_array_data_;
- GrowableArray<RegisterInfo*> tempreg_info_;
- GrowableArray<RegisterInfo*> reginfo_map_;
- GrowableArray<void*> pointer_storage_;
+ ArenaVector<SwitchTable*> switch_tables_;
+ ArenaVector<FillArrayData*> fill_array_data_;
+ ArenaVector<RegisterInfo*> tempreg_info_;
+ ArenaVector<RegisterInfo*> reginfo_map_;
+ ArenaVector<void*> pointer_storage_;
CodeOffset current_code_offset_; // Working byte offset of machine instructons.
CodeOffset data_offset_; // starting offset of literal pool.
size_t total_size_; // header + code size.
@@ -1697,7 +1697,7 @@ class Mir2Lir : public Backend {
*/
DexOffset current_dalvik_offset_;
size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
- RegisterPool* reg_pool_;
+ std::unique_ptr<RegisterPool> reg_pool_;
/*
* Sanity checking for the register temp tracking. The same ssa
* name should never be associated with one temp register per
@@ -1720,7 +1720,7 @@ class Mir2Lir : public Backend {
LIR* first_lir_insn_;
LIR* last_lir_insn_;
- GrowableArray<LIRSlowPath*> slow_paths_;
+ ArenaVector<LIRSlowPath*> slow_paths_;
// The memory reference type for new LIRs.
// NOTE: Passing this as an explicit parameter by all functions that directly or indirectly
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 195da0d..b171c78 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -28,8 +28,7 @@ namespace art {
* live until it is either explicitly killed or reallocated.
*/
void Mir2Lir::ResetRegPool() {
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
info->MarkFree();
}
// Reset temp tracking sanity check.
@@ -66,41 +65,38 @@ Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
const ArrayRef<const RegStorage>& core64_temps,
const ArrayRef<const RegStorage>& sp_temps,
const ArrayRef<const RegStorage>& dp_temps) :
- core_regs_(arena, core_regs.size()), next_core_reg_(0),
- core64_regs_(arena, core64_regs.size()), next_core64_reg_(0),
- sp_regs_(arena, sp_regs.size()), next_sp_reg_(0),
- dp_regs_(arena, dp_regs.size()), next_dp_reg_(0), m2l_(m2l) {
+ core_regs_(arena->Adapter()), next_core_reg_(0),
+ core64_regs_(arena->Adapter()), next_core64_reg_(0),
+ sp_regs_(arena->Adapter()), next_sp_reg_(0),
+ dp_regs_(arena->Adapter()), next_dp_reg_(0), m2l_(m2l) {
// Initialize the fast lookup map.
- m2l_->reginfo_map_.Reset();
- if (kIsDebugBuild) {
- m2l_->reginfo_map_.Resize(RegStorage::kMaxRegs);
- for (unsigned i = 0; i < RegStorage::kMaxRegs; i++) {
- m2l_->reginfo_map_.Insert(nullptr);
- }
- } else {
- m2l_->reginfo_map_.SetSize(RegStorage::kMaxRegs);
- }
+ m2l_->reginfo_map_.clear();
+ m2l_->reginfo_map_.resize(RegStorage::kMaxRegs, nullptr);
// Construct the register pool.
+ core_regs_.reserve(core_regs.size());
for (const RegStorage& reg : core_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- core_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ core_regs_.push_back(info);
}
+ core64_regs_.reserve(core64_regs.size());
for (const RegStorage& reg : core64_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- core64_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ core64_regs_.push_back(info);
}
+ sp_regs_.reserve(sp_regs.size());
for (const RegStorage& reg : sp_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- sp_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ sp_regs_.push_back(info);
}
+ dp_regs_.reserve(dp_regs.size());
for (const RegStorage& reg : dp_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- dp_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ dp_regs_.push_back(info);
}
// Keep special registers from being allocated.
@@ -127,10 +123,10 @@ Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
// Add an entry for InvalidReg with zero'd mask.
RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), kEncodeNone);
- m2l_->reginfo_map_.Put(RegStorage::InvalidReg().GetReg(), invalid_reg);
+ m2l_->reginfo_map_[RegStorage::InvalidReg().GetReg()] = invalid_reg;
// Existence of core64 registers implies wide references.
- if (core64_regs_.Size() != 0) {
+ if (core64_regs_.size() != 0) {
ref_regs_ = &core64_regs_;
next_ref_reg_ = &next_core64_reg_;
} else {
@@ -139,10 +135,9 @@ Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
}
}
-void Mir2Lir::DumpRegPool(GrowableArray<RegisterInfo*>* regs) {
+void Mir2Lir::DumpRegPool(ArenaVector<RegisterInfo*>* regs) {
LOG(INFO) << "================================================";
- GrowableArray<RegisterInfo*>::Iterator it(regs);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : *regs) {
LOG(INFO) << StringPrintf(
"R[%d:%d:%c]: T:%d, U:%d, W:%d, p:%d, LV:%d, D:%d, SR:%d, DEF:%d",
info->GetReg().GetReg(), info->GetReg().GetRegNum(), info->GetReg().IsFloat() ? 'f' : 'c',
@@ -222,8 +217,7 @@ void Mir2Lir::ClobberSReg(int s_reg) {
if (kIsDebugBuild && s_reg == live_sreg_) {
live_sreg_ = INVALID_SREG;
}
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
if (info->SReg() == s_reg) {
if (info->GetReg().NotExactlyEquals(info->Partner())) {
// Dealing with a pair - clobber the other half.
@@ -278,8 +272,7 @@ RegStorage Mir2Lir::AllocPreservedCoreReg(int s_reg) {
* happens from the single or double pool. This entire section of code could stand
* a good refactoring.
*/
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->core_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
RecordCorePromotion(res, s_reg);
@@ -311,8 +304,7 @@ RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
*/
DCHECK_NE(cu_->instruction_set, kThumb2);
RegStorage res;
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
RecordFpPromotion(res, s_reg);
@@ -337,13 +329,14 @@ RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
}
-RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required) {
- int num_regs = regs.Size();
+RegStorage Mir2Lir::AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required) {
+ int num_regs = regs.size();
int next = *next_temp;
for (int i = 0; i< num_regs; i++) {
- if (next >= num_regs)
+ if (next >= num_regs) {
next = 0;
- RegisterInfo* info = regs.Get(next);
+ }
+ RegisterInfo* info = regs[next];
// Try to allocate a register that doesn't hold a live value.
if (info->IsTemp() && !info->InUse() && info->IsDead()) {
// If it's wide, split it up.
@@ -367,9 +360,10 @@ RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_
next = *next_temp;
// No free non-live regs. Anything we can kill?
for (int i = 0; i< num_regs; i++) {
- if (next >= num_regs)
+ if (next >= num_regs) {
next = 0;
- RegisterInfo* info = regs.Get(next);
+ }
+ RegisterInfo* info = regs[next];
if (info->IsTemp() && !info->InUse()) {
// Got one. Kill it.
ClobberSReg(info->SReg());
@@ -401,7 +395,7 @@ RegStorage Mir2Lir::AllocTemp(bool required) {
RegStorage Mir2Lir::AllocTempWide(bool required) {
RegStorage res;
- if (reg_pool_->core64_regs_.Size() != 0) {
+ if (reg_pool_->core64_regs_.size() != 0) {
res = AllocTempBody(reg_pool_->core64_regs_, &reg_pool_->next_core64_reg_, required);
} else {
RegStorage low_reg = AllocTemp();
@@ -458,10 +452,9 @@ RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class, bool required) {
return AllocTemp(required);
}
-RegStorage Mir2Lir::FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg) {
+RegStorage Mir2Lir::FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg) {
RegStorage res;
- GrowableArray<RegisterInfo*>::Iterator it(&regs);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : regs) {
if ((info->SReg() == s_reg) && info->IsLive()) {
res = info->GetReg();
break;
@@ -714,15 +707,13 @@ void Mir2Lir::ResetDefLocWide(RegLocation rl) {
}
void Mir2Lir::ResetDefTracking() {
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
info->ResetDefBody();
}
}
void Mir2Lir::ClobberAllTemps() {
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
ClobberBody(info);
}
}
@@ -780,8 +771,7 @@ void Mir2Lir::FlushSpecificReg(RegisterInfo* info) {
}
void Mir2Lir::FlushAllRegs() {
- GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
if (info->IsDirty() && info->IsLive()) {
FlushSpecificReg(info);
}
@@ -853,14 +843,16 @@ void Mir2Lir::MarkLive(RegLocation loc) {
void Mir2Lir::MarkTemp(RegStorage reg) {
DCHECK(!reg.IsPair());
RegisterInfo* info = GetRegInfo(reg);
- tempreg_info_.Insert(info);
+ tempreg_info_.push_back(info);
info->SetIsTemp(true);
}
void Mir2Lir::UnmarkTemp(RegStorage reg) {
DCHECK(!reg.IsPair());
RegisterInfo* info = GetRegInfo(reg);
- tempreg_info_.Delete(info);
+ auto pos = std::find(tempreg_info_.begin(), tempreg_info_.end(), info);
+ DCHECK(pos != tempreg_info_.end());
+ tempreg_info_.erase(pos);
info->SetIsTemp(false);
}
@@ -932,8 +924,7 @@ void Mir2Lir::MarkInUse(RegStorage reg) {
}
bool Mir2Lir::CheckCorePoolSanity() {
- GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
int my_sreg = info->SReg();
if (info->IsTemp() && info->IsLive() && info->IsWide() && my_sreg != INVALID_SREG) {
RegStorage my_reg = info->GetReg();
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 482c430..5b92512 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -73,7 +73,7 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -145,7 +145,7 @@ void X86Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 6020e70..80da962 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -947,13 +947,13 @@ class X86Mir2Lir : public Mir2Lir {
LIR* setup_method_address_[2];
// Instructions needing patching with Method* values.
- GrowableArray<LIR*> method_address_insns_;
+ ArenaVector<LIR*> method_address_insns_;
// Instructions needing patching with Class Type* values.
- GrowableArray<LIR*> class_type_address_insns_;
+ ArenaVector<LIR*> class_type_address_insns_;
// Instructions needing patching with PC relative code addresses.
- GrowableArray<LIR*> call_method_insns_;
+ ArenaVector<LIR*> call_method_insns_;
// Prologue decrement of stack pointer.
LIR* stack_decrement_;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index de11996..d3eafc9 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -620,13 +620,15 @@ bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
void X86Mir2Lir::CompilerInitializeRegAlloc() {
if (cu_->target64) {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
- dp_regs_64, reserved_regs_64, reserved_regs_64q,
- core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
+ dp_regs_64, reserved_regs_64, reserved_regs_64q,
+ core_temps_64, core_temps_64q,
+ sp_temps_64, dp_temps_64));
} else {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
- dp_regs_32, reserved_regs_32, empty_pool,
- core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
+ dp_regs_32, reserved_regs_32, empty_pool,
+ core_temps_32, empty_pool,
+ sp_temps_32, dp_temps_32));
}
// Target-specific adjustments.
@@ -635,7 +637,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32;
for (RegStorage reg : *xp_regs) {
RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
- reginfo_map_.Put(reg.GetReg(), info);
+ reginfo_map_[reg.GetReg()] = info;
}
const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
for (RegStorage reg : *xp_temps) {
@@ -645,8 +647,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
// Alias single precision xmm to double xmms.
// TODO: as needed, add larger vector sizes - alias all to the largest.
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
@@ -666,8 +667,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
if (cu_->target64) {
// Alias 32bit W registers to corresponding 64bit X registers.
- GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
- for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
int x_reg_num = info->GetReg().GetRegNum();
RegStorage x_reg = RegStorage::Solo64(x_reg_num);
RegisterInfo* x_reg_info = GetRegInfo(x_reg);
@@ -785,11 +785,14 @@ RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatil
X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena),
base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
- method_address_insns_(arena, 100, kGrowableArrayMisc),
- class_type_address_insns_(arena, 100, kGrowableArrayMisc),
- call_method_insns_(arena, 100, kGrowableArrayMisc),
+ method_address_insns_(arena->Adapter()),
+ class_type_address_insns_(arena->Adapter()),
+ call_method_insns_(arena->Adapter()),
stack_decrement_(nullptr), stack_increment_(nullptr),
const_vectors_(nullptr) {
+ method_address_insns_.reserve(100);
+ class_type_address_insns_.reserve(100);
+ call_method_insns_.reserve(100);
store_method_addr_used_ = false;
if (kIsDebugBuild) {
for (int i = 0; i < kX86Last; i++) {
@@ -977,7 +980,7 @@ void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeT
static_cast<int>(target_method_id_ptr), target_method_idx,
WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(move);
- method_address_insns_.Insert(move);
+ method_address_insns_.push_back(move);
}
void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
@@ -996,7 +999,7 @@ void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
static_cast<int>(ptr), type_idx,
WrapPointer(const_cast<DexFile*>(&dex_file)));
AppendLIR(move);
- class_type_address_insns_.Insert(move);
+ class_type_address_insns_.push_back(move);
}
LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
@@ -1014,7 +1017,7 @@ LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, Invok
LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(call);
- call_method_insns_.Insert(call);
+ call_method_insns_.push_back(call);
return call;
}
@@ -1045,8 +1048,7 @@ void X86Mir2Lir::InstallLiteralPools() {
}
// Handle the fixups for methods.
- for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
- LIR* p = method_address_insns_.Get(i);
+ for (LIR* p : method_address_insns_) {
DCHECK_EQ(p->opcode, kX86Mov32RI);
uint32_t target_method_idx = p->operands[2];
const DexFile* target_dex_file =
@@ -1062,8 +1064,7 @@ void X86Mir2Lir::InstallLiteralPools() {
}
// Handle the fixups for class types.
- for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
- LIR* p = class_type_address_insns_.Get(i);
+ for (LIR* p : class_type_address_insns_) {
DCHECK_EQ(p->opcode, kX86Mov32RI);
const DexFile* class_dex_file =
@@ -1078,8 +1079,7 @@ void X86Mir2Lir::InstallLiteralPools() {
}
// And now the PC-relative calls to methods.
- for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
- LIR* p = call_method_insns_.Get(i);
+ for (LIR* p : call_method_insns_) {
DCHECK_EQ(p->opcode, kX86CallI);
uint32_t target_method_idx = p->operands[1];
const DexFile* target_dex_file =
@@ -1577,11 +1577,11 @@ void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
for (RegisterInfo *info = xp_reg_info->GetAliasChain();
info != nullptr;
info = info->GetAliasChain()) {
- if (info->GetReg().IsSingle()) {
- reg_pool_->sp_regs_.Delete(info);
- } else {
- reg_pool_->dp_regs_.Delete(info);
- }
+ ArenaVector<RegisterInfo*>* regs =
+ info->GetReg().IsSingle() ? &reg_pool_->sp_regs_ : &reg_pool_->dp_regs_;
+ auto it = std::find(regs->begin(), regs->end(), info);
+ DCHECK(it != regs->end());
+ regs->erase(it);
}
}
}
@@ -1595,9 +1595,9 @@ void X86Mir2Lir::ReturnVectorRegisters(MIR* mir) {
info != nullptr;
info = info->GetAliasChain()) {
if (info->GetReg().IsSingle()) {
- reg_pool_->sp_regs_.Insert(info);
+ reg_pool_->sp_regs_.push_back(info);
} else {
- reg_pool_->dp_regs_.Insert(info);
+ reg_pool_->dp_regs_.push_back(info);
}
}
}