summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2014-09-25 11:23:21 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-09-25 11:23:22 +0000
commitf2476d524281c6d649f5deb6d1ccccc92380c1ed (patch)
tree5a7351ed7b785d096ccec00871c8f8007d5449c9
parentc5c71bfa21aee5ad05217af57e94a0263c4eef1d (diff)
parente39c54ea575ec710d5e84277fcdcc049f8acb3c9 (diff)
downloadart-f2476d524281c6d649f5deb6d1ccccc92380c1ed.zip
art-f2476d524281c6d649f5deb6d1ccccc92380c1ed.tar.gz
art-f2476d524281c6d649f5deb6d1ccccc92380c1ed.tar.bz2
Merge "Deprecate GrowableArray, use ArenaVector instead."
-rw-r--r--compiler/dex/dataflow_iterator-inl.h55
-rw-r--r--compiler/dex/dataflow_iterator.h44
-rw-r--r--compiler/dex/global_value_numbering.cc17
-rw-r--r--compiler/dex/global_value_numbering_test.cc65
-rw-r--r--compiler/dex/local_value_numbering_test.cc20
-rw-r--r--compiler/dex/mir_analysis.cc24
-rw-r--r--compiler/dex/mir_dataflow.cc58
-rw-r--r--compiler/dex/mir_graph.cc337
-rw-r--r--compiler/dex/mir_graph.h125
-rw-r--r--compiler/dex/mir_graph_test.cc48
-rw-r--r--compiler/dex/mir_optimization.cc75
-rw-r--r--compiler/dex/mir_optimization_test.cc40
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc17
-rw-r--r--compiler/dex/portable/mir_to_gbc.h6
-rw-r--r--compiler/dex/post_opt_passes.cc4
-rw-r--r--compiler/dex/quick/arm/call_arm.cc6
-rw-r--r--compiler/dex/quick/arm/target_arm.cc18
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc6
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc12
-rw-r--r--compiler/dex/quick/codegen_util.cc49
-rw-r--r--compiler/dex/quick/gen_common.cc5
-rw-r--r--compiler/dex/quick/mips/call_mips.cc6
-rw-r--r--compiler/dex/quick/mips/target_mips.cc12
-rw-r--r--compiler/dex/quick/mir_to_lir-inl.h13
-rw-r--r--compiler/dex/quick/mir_to_lir.cc11
-rw-r--r--compiler/dex/quick/mir_to_lir.h38
-rw-r--r--compiler/dex/quick/ralloc_util.cc99
-rw-r--r--compiler/dex/quick/x86/call_x86.cc4
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h6
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc60
-rw-r--r--compiler/dex/ssa_transformation.cc110
-rw-r--r--compiler/utils/arena_allocator.cc11
-rw-r--r--compiler/utils/arena_allocator.h11
-rw-r--r--compiler/utils/growable_array.h51
34 files changed, 638 insertions, 825 deletions
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index d1abf7f..89f2b5c 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -28,7 +28,7 @@ inline BasicBlock* DataflowIterator::ForwardSingleNext() {
// Are we not yet at the end?
if (idx_ < end_idx_) {
// Get the next index.
- BasicBlockId bb_id = block_id_list_->Get(idx_);
+ BasicBlockId bb_id = (*block_id_list_)[idx_];
res = mir_graph_->GetBasicBlock(bb_id);
idx_++;
}
@@ -51,7 +51,7 @@ inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
// Are we not yet at the end?
if (idx_ < end_idx_) {
// Get the BasicBlockId.
- BasicBlockId bb_id = block_id_list_->Get(idx_);
+ BasicBlockId bb_id = (*block_id_list_)[idx_];
res = mir_graph_->GetBasicBlock(bb_id);
idx_++;
}
@@ -66,7 +66,7 @@ inline BasicBlock* DataflowIterator::ReverseSingleNext() {
// Are we not yet at the end?
if (idx_ >= 0) {
// Get the BasicBlockId.
- BasicBlockId bb_id = block_id_list_->Get(idx_);
+ BasicBlockId bb_id = (*block_id_list_)[idx_];
res = mir_graph_->GetBasicBlock(bb_id);
idx_--;
}
@@ -89,7 +89,7 @@ inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
// Are we not yet done?
if (idx_ >= 0) {
// Get the BasicBlockId.
- BasicBlockId bb_id = block_id_list_->Get(idx_);
+ BasicBlockId bb_id = (*block_id_list_)[idx_];
res = mir_graph_->GetBasicBlock(bb_id);
idx_--;
}
@@ -97,34 +97,28 @@ inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
return res;
}
-// AllNodes uses the existing GrowableArray iterator, and should be considered unordered.
+// AllNodes uses the existing block list, and should be considered unordered.
inline BasicBlock* AllNodesIterator::Next(bool had_change) {
- BasicBlock* res = NULL;
-
- // Suppose we want to keep looking.
- bool keep_looking = true;
-
- // Find the next BasicBlock.
- while (keep_looking == true) {
- // Get next BasicBlock.
- res = all_nodes_iterator_.Next();
+ // Update changed: if had_changed is true, we remember it for the whole iteration.
+ changed_ |= had_change;
- // Are we done or is the BasicBlock not hidden?
- if ((res == NULL) || (res->hidden == false)) {
- keep_looking = false;
+ BasicBlock* res = nullptr;
+ while (idx_ != end_idx_) {
+ BasicBlock* bb = mir_graph_->GetBlockList()[idx_++];
+ DCHECK(bb != nullptr);
+ if (!bb->hidden) {
+ res = bb;
+ break;
}
}
- // Update changed: if had_changed is true, we remember it for the whole iteration.
- changed_ |= had_change;
-
return res;
}
inline BasicBlock* LoopRepeatingTopologicalSortIterator::Next(bool had_change) {
if (idx_ != 0) {
// Mark last processed block visited.
- BasicBlock* bb = mir_graph_->GetBasicBlock(block_id_list_->Get(idx_ - 1));
+ BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx_ - 1]);
bb->visited = true;
if (had_change) {
// If we had a change we need to revisit the children.
@@ -138,16 +132,17 @@ inline BasicBlock* LoopRepeatingTopologicalSortIterator::Next(bool had_change) {
while (true) {
// Pop loops we have left and check if we need to recalculate one of them.
// NOTE: We need to do this even if idx_ == end_idx_.
- while (loop_head_stack_->Size() != 0u &&
- loop_ends_->Get(loop_head_stack_->Peek().first) == idx_) {
- auto top = loop_head_stack_->Peek();
+ while (loop_head_stack_->size() != 0u &&
+ (*loop_ends_)[loop_head_stack_->back().first] == idx_) {
+ auto top = loop_head_stack_->back();
uint16_t loop_head_idx = top.first;
bool recalculated = top.second;
- loop_head_stack_->Pop();
- BasicBlock* loop_head = mir_graph_->GetBasicBlock(block_id_list_->Get(loop_head_idx));
+ loop_head_stack_->pop_back();
+ BasicBlock* loop_head = mir_graph_->GetBasicBlock((*block_id_list_)[loop_head_idx]);
DCHECK(loop_head != nullptr);
if (!recalculated || !loop_head->visited) {
- loop_head_stack_->Insert(std::make_pair(loop_head_idx, true)); // Recalculating this loop.
+ // Recalculating this loop.
+ loop_head_stack_->push_back(std::make_pair(loop_head_idx, true));
idx_ = loop_head_idx + 1;
return loop_head;
}
@@ -160,11 +155,11 @@ inline BasicBlock* LoopRepeatingTopologicalSortIterator::Next(bool had_change) {
// Get next block and return it if unvisited.
BasicBlockId idx = idx_;
idx_ += 1;
- BasicBlock* bb = mir_graph_->GetBasicBlock(block_id_list_->Get(idx));
+ BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx]);
DCHECK(bb != nullptr);
if (!bb->visited) {
- if (loop_ends_->Get(idx) != 0u) {
- loop_head_stack_->Insert(std::make_pair(idx, false)); // Not recalculating.
+ if ((*loop_ends_)[idx] != 0u) {
+ loop_head_stack_->push_back(std::make_pair(idx, false)); // Not recalculating.
}
return bb;
}
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 06d6832..188f1d9 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -104,7 +104,7 @@ namespace art {
MIRGraph* const mir_graph_; /**< @brief the MIRGraph */
const int32_t start_idx_; /**< @brief the start index for the iteration */
const int32_t end_idx_; /**< @brief the last index for the iteration */
- GrowableArray<BasicBlockId>* block_id_list_; /**< @brief the list of BasicBlocks we want to iterate on */
+ const ArenaVector<BasicBlockId>* block_id_list_; /**< @brief the list of BasicBlocks we want to iterate on */
int32_t idx_; /**< @brief Current index for the iterator */
int32_t repeats_; /**< @brief Number of repeats over the iteration */
bool changed_; /**< @brief Has something changed during the current iteration? */
@@ -124,7 +124,7 @@ namespace art {
: DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
// Extra setup for the PreOrderDfsIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetDfsOrder();
+ block_id_list_ = &mir_graph->GetDfsOrder();
}
/**
@@ -155,7 +155,7 @@ namespace art {
: DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
// Extra setup for the RepeatingPreOrderDfsIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetDfsOrder();
+ block_id_list_ = &mir_graph->GetDfsOrder();
}
/**
@@ -186,7 +186,7 @@ namespace art {
: DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
// Extra setup for the RepeatingPostOrderDfsIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetDfsPostOrder();
+ block_id_list_ = &mir_graph->GetDfsPostOrder();
}
/**
@@ -216,7 +216,7 @@ namespace art {
: DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
// Extra setup for the ReversePostOrderDfsIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetDfsPostOrder();
+ block_id_list_ = &mir_graph->GetDfsPostOrder();
}
/**
@@ -247,7 +247,7 @@ namespace art {
: DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
// Extra setup for the RepeatingReversePostOrderDfsIterator
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetDfsPostOrder();
+ block_id_list_ = &mir_graph->GetDfsPostOrder();
}
/**
@@ -277,7 +277,7 @@ namespace art {
: DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
// Extra setup for thePostOrderDOMIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetDomPostOrder();
+ block_id_list_ = &mir_graph->GetDomPostOrder();
}
/**
@@ -304,15 +304,14 @@ namespace art {
* @param mir_graph The MIRGraph considered.
*/
explicit AllNodesIterator(MIRGraph* mir_graph)
- : DataflowIterator(mir_graph, 0, 0),
- all_nodes_iterator_(mir_graph->GetBlockList()) {
+ : DataflowIterator(mir_graph, 0, mir_graph->GetBlockList().size()) {
}
/**
* @brief Resetting the iterator.
*/
void Reset() {
- all_nodes_iterator_.Reset();
+ idx_ = 0;
}
/**
@@ -321,9 +320,6 @@ namespace art {
* @return the next BasicBlock following the iteration order, 0 if finished.
*/
virtual BasicBlock* Next(bool had_change = false) ALWAYS_INLINE;
-
- private:
- GrowableArray<BasicBlock*>::Iterator all_nodes_iterator_; /**< @brief The list of all the nodes */
};
/**
@@ -337,10 +333,10 @@ namespace art {
* @param mir_graph The MIRGraph considered.
*/
explicit TopologicalSortIterator(MIRGraph* mir_graph)
- : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder()->Size()) {
+ : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()) {
// Extra setup for TopologicalSortIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetTopologicalSortOrder();
+ block_id_list_ = &mir_graph->GetTopologicalSortOrder();
}
/**
@@ -369,10 +365,10 @@ namespace art {
* @param mir_graph The MIRGraph considered.
*/
explicit RepeatingTopologicalSortIterator(MIRGraph* mir_graph)
- : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder()->Size()) {
+ : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()) {
// Extra setup for RepeatingTopologicalSortIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetTopologicalSortOrder();
+ block_id_list_ = &mir_graph->GetTopologicalSortOrder();
}
/**
@@ -408,19 +404,19 @@ namespace art {
* @param mir_graph The MIRGraph considered.
*/
explicit LoopRepeatingTopologicalSortIterator(MIRGraph* mir_graph)
- : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder()->Size()),
- loop_ends_(mir_graph->GetTopologicalSortOrderLoopEnds()),
+ : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()),
+ loop_ends_(&mir_graph->GetTopologicalSortOrderLoopEnds()),
loop_head_stack_(mir_graph_->GetTopologicalSortOrderLoopHeadStack()) {
// Extra setup for RepeatingTopologicalSortIterator.
idx_ = start_idx_;
- block_id_list_ = mir_graph->GetTopologicalSortOrder();
+ block_id_list_ = &mir_graph->GetTopologicalSortOrder();
// Clear visited flags and check that the loop head stack is empty.
mir_graph->ClearAllVisitedFlags();
- DCHECK_EQ(loop_head_stack_->Size(), 0u);
+ DCHECK_EQ(loop_head_stack_->size(), 0u);
}
~LoopRepeatingTopologicalSortIterator() {
- DCHECK_EQ(loop_head_stack_->Size(), 0u);
+ DCHECK_EQ(loop_head_stack_->size(), 0u);
}
/**
@@ -431,8 +427,8 @@ namespace art {
virtual BasicBlock* Next(bool had_change = false) OVERRIDE;
private:
- const GrowableArray<BasicBlockId>* const loop_ends_;
- GrowableArray<std::pair<uint16_t, bool>>* const loop_head_stack_;
+ const ArenaVector<BasicBlockId>* const loop_ends_;
+ ArenaVector<std::pair<uint16_t, bool>>* const loop_head_stack_;
};
} // namespace art
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index 4d885fd..af57529 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -89,21 +89,20 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
// the loop head stack will also be empty and there will be nothing to merge anyway.
bool use_all_predecessors = true;
uint16_t loop_head_idx = 0u; // Used only if !use_all_predecessors.
- if (mir_graph_->GetTopologicalSortOrderLoopHeadStack()->Size() != 0) {
+ if (mir_graph_->GetTopologicalSortOrderLoopHeadStack()->size() != 0) {
// Full GVN inside a loop, see if we're at the loop head for the first time.
- auto top = mir_graph_->GetTopologicalSortOrderLoopHeadStack()->Peek();
+ auto top = mir_graph_->GetTopologicalSortOrderLoopHeadStack()->back();
loop_head_idx = top.first;
bool recalculating = top.second;
use_all_predecessors = recalculating ||
- loop_head_idx != mir_graph_->GetTopologicalSortOrderIndexes()->Get(bb->id);
+ loop_head_idx != mir_graph_->GetTopologicalSortOrderIndexes()[bb->id];
}
- GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
- for (BasicBlock* pred_bb = mir_graph_->GetBasicBlock(iter.Next());
- pred_bb != nullptr; pred_bb = mir_graph_->GetBasicBlock(iter.Next())) {
- if (lvns_[pred_bb->id] != nullptr &&
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_NE(pred_id, NullBasicBlockId);
+ if (lvns_[pred_id] != nullptr &&
(use_all_predecessors ||
- mir_graph_->GetTopologicalSortOrderIndexes()->Get(pred_bb->id) < loop_head_idx)) {
- merge_lvns_.push_back(lvns_[pred_bb->id]);
+ mir_graph_->GetTopologicalSortOrderIndexes()[pred_id] < loop_head_idx)) {
+ merge_lvns_.push_back(lvns_[pred_id]);
}
}
// Determine merge type.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 1c0de37..c808234 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -129,8 +129,8 @@ class GlobalValueNumberingTest : public testing::Test {
{ bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
- cu_.mir_graph->ifield_lowering_infos_.Reset();
- cu_.mir_graph->ifield_lowering_infos_.Resize(count);
+ cu_.mir_graph->ifield_lowering_infos_.clear();
+ cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
MirIFieldLoweringInfo field_info(def->field_idx);
@@ -140,7 +140,7 @@ class GlobalValueNumberingTest : public testing::Test {
field_info.flags_ = 0u | // Without kFlagIsStatic.
(def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
}
- cu_.mir_graph->ifield_lowering_infos_.Insert(field_info);
+ cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
}
}
@@ -150,8 +150,8 @@ class GlobalValueNumberingTest : public testing::Test {
}
void DoPrepareSFields(const SFieldDef* defs, size_t count) {
- cu_.mir_graph->sfield_lowering_infos_.Reset();
- cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const SFieldDef* def = &defs[i];
MirSFieldLoweringInfo field_info(def->field_idx);
@@ -163,7 +163,7 @@ class GlobalValueNumberingTest : public testing::Test {
field_info.declaring_field_idx_ = def->declaring_field_idx;
field_info.flags_ |= (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
}
- cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
}
}
@@ -174,41 +174,33 @@ class GlobalValueNumberingTest : public testing::Test {
void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
cu_.mir_graph->block_id_map_.clear();
- cu_.mir_graph->block_list_.Reset();
+ cu_.mir_graph->block_list_.clear();
ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
ASSERT_EQ(kNullBlock, defs[0].type);
ASSERT_EQ(kEntryBlock, defs[1].type);
ASSERT_EQ(kExitBlock, defs[2].type);
for (size_t i = 0u; i != count; ++i) {
const BBDef* def = &defs[i];
- BasicBlock* bb = cu_.mir_graph->NewMemBB(def->type, i);
- cu_.mir_graph->block_list_.Insert(bb);
+ BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
if (def->num_successors <= 2) {
bb->successor_block_list_type = kNotUsed;
- bb->successor_blocks = nullptr;
bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
} else {
bb->successor_block_list_type = kPackedSwitch;
bb->fall_through = 0u;
bb->taken = 0u;
- bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
- &cu_.arena, def->num_successors, kGrowableArraySuccessorBlocks);
+ bb->successor_blocks.reserve(def->num_successors);
for (size_t j = 0u; j != def->num_successors; ++j) {
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
successor_block_info->block = j;
successor_block_info->key = 0u; // Not used by class init check elimination.
- bb->successor_blocks->Insert(successor_block_info);
+ bb->successor_blocks.push_back(successor_block_info);
}
}
- bb->predecessors = new (&cu_.arena) GrowableArray<BasicBlockId>(
- &cu_.arena, def->num_predecessors, kGrowableArrayPredecessors);
- for (size_t j = 0u; j != def->num_predecessors; ++j) {
- ASSERT_NE(0u, def->predecessors[j]);
- bb->predecessors->Insert(def->predecessors[j]);
- }
+ bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
@@ -216,10 +208,10 @@ class GlobalValueNumberingTest : public testing::Test {
}
}
cu_.mir_graph->num_blocks_ = count;
- ASSERT_EQ(count, cu_.mir_graph->block_list_.Size());
- cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_.Get(1);
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
- cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_.Get(2);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
}
@@ -235,24 +227,23 @@ class GlobalValueNumberingTest : public testing::Test {
for (size_t i = 0u; i != count; ++i) {
const MIRDef* def = &defs[i];
MIR* mir = &mirs_[i];
- ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.Size());
- BasicBlock* bb = cu_.mir_graph->block_list_.Get(def->bbid);
+ ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+ BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
bb->AppendMIR(mir);
mir->dalvikInsn.opcode = def->opcode;
mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
mir->dalvikInsn.vB_wide = def->value;
if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
- ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.Size());
+ ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
mir->meta.ifield_lowering_info = def->field_info;
} else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
- ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+ ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
mir->meta.sfield_lowering_info = def->field_info;
} else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
mir->meta.phi_incoming = static_cast<BasicBlockId*>(
allocator_->Alloc(def->num_uses * sizeof(BasicBlockId), kArenaAllocDFInfo));
- for (size_t i = 0; i != def->num_uses; ++i) {
- mir->meta.phi_incoming[i] = bb->predecessors->Get(i);
- }
+ ASSERT_EQ(def->num_uses, bb->predecessors.size());
+ std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
}
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
@@ -345,11 +336,11 @@ class GlobalValueNumberingTest : public testing::Test {
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
// Bind all possible sregs to live vregs for test purposes.
live_in_v_->SetInitialBits(kMaxSsaRegs);
- cu_.mir_graph->ssa_base_vregs_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
- cu_.mir_graph->ssa_subscripts_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
+ cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+ cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
- cu_.mir_graph->ssa_base_vregs_->Insert(i);
- cu_.mir_graph->ssa_subscripts_->Insert(0);
+ cu_.mir_graph->ssa_base_vregs_.push_back(i);
+ cu_.mir_graph->ssa_subscripts_.push_back(0);
}
}
@@ -438,12 +429,10 @@ GlobalValueNumberingTestCatch::GlobalValueNumberingTestCatch()
// Add successor block info to the check block.
BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
check_bb->successor_block_list_type = kCatch;
- check_bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
- &cu_.arena, 2, kGrowableArraySuccessorBlocks);
SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
(cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
successor_block_info->block = catch_handler->id;
- check_bb->successor_blocks->Insert(successor_block_info);
+ check_bb->successor_blocks.push_back(successor_block_info);
}
class GlobalValueNumberingTestTwoConsecutiveLoops : public GlobalValueNumberingTest {
@@ -2120,12 +2109,10 @@ TEST_F(GlobalValueNumberingTest, NormalPathToCatchEntry) {
// Add successor block info to the check block.
BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
check_bb->successor_block_list_type = kCatch;
- check_bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
- &cu_.arena, 2, kGrowableArraySuccessorBlocks);
SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
(cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
successor_block_info->block = catch_handler->id;
- check_bb->successor_blocks->Insert(successor_block_info);
+ check_bb->successor_blocks.push_back(successor_block_info);
BasicBlock* merge_block = cu_.mir_graph->GetBasicBlock(4u);
std::swap(merge_block->taken, merge_block->fall_through);
PrepareMIRs(mirs);
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index e4e944e..e53c640 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -86,8 +86,8 @@ class LocalValueNumberingTest : public testing::Test {
{ opcode, 0u, 0u, 0, { }, 1, { reg } } // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
- cu_.mir_graph->ifield_lowering_infos_.Reset();
- cu_.mir_graph->ifield_lowering_infos_.Resize(count);
+ cu_.mir_graph->ifield_lowering_infos_.clear();
+ cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
MirIFieldLoweringInfo field_info(def->field_idx);
@@ -97,7 +97,7 @@ class LocalValueNumberingTest : public testing::Test {
field_info.flags_ = 0u | // Without kFlagIsStatic.
(def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
}
- cu_.mir_graph->ifield_lowering_infos_.Insert(field_info);
+ cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
}
}
@@ -107,8 +107,8 @@ class LocalValueNumberingTest : public testing::Test {
}
void DoPrepareSFields(const SFieldDef* defs, size_t count) {
- cu_.mir_graph->sfield_lowering_infos_.Reset();
- cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const SFieldDef* def = &defs[i];
MirSFieldLoweringInfo field_info(def->field_idx);
@@ -120,7 +120,7 @@ class LocalValueNumberingTest : public testing::Test {
field_info.declaring_field_idx_ = def->declaring_field_idx;
field_info.flags_ |= (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
}
- cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
}
}
@@ -140,10 +140,10 @@ class LocalValueNumberingTest : public testing::Test {
mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
mir->dalvikInsn.vB_wide = def->value;
if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
- ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.Size());
+ ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
mir->meta.ifield_lowering_info = def->field_info;
} else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
- ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+ ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
mir->meta.sfield_lowering_info = def->field_info;
}
mir->ssa_rep = &ssa_reps_[i];
@@ -170,8 +170,8 @@ class LocalValueNumberingTest : public testing::Test {
}
void MakeSFieldUninitialized(uint32_t sfield_index) {
- CHECK_LT(sfield_index, cu_.mir_graph->sfield_lowering_infos_.Size());
- cu_.mir_graph->sfield_lowering_infos_.GetRawStorage()[sfield_index].flags_ &=
+ CHECK_LT(sfield_index, cu_.mir_graph->sfield_lowering_infos_.size());
+ cu_.mir_graph->sfield_lowering_infos_[sfield_index].flags_ &=
~MirSFieldLoweringInfo::kFlagIsInitialized;
}
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 9fa5fac..1111e4c 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1218,25 +1218,25 @@ void MIRGraph::DoCacheFieldLoweringInfo() {
if (ifield_pos != 0u) {
// Resolve instance field infos.
- DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
- ifield_lowering_infos_.Resize(ifield_pos);
+ DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
+ ifield_lowering_infos_.reserve(ifield_pos);
for (size_t pos = 0u; pos != ifield_pos; ++pos) {
- ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
+ ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos]));
}
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
- ifield_lowering_infos_.GetRawStorage(), ifield_pos);
+ ifield_lowering_infos_.data(), ifield_pos);
}
if (sfield_pos != max_refs) {
// Resolve static field infos.
- DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
- sfield_lowering_infos_.Resize(max_refs - sfield_pos);
+ DCHECK_EQ(sfield_lowering_infos_.size(), 0u);
+ sfield_lowering_infos_.reserve(max_refs - sfield_pos);
for (size_t pos = max_refs; pos != sfield_pos;) {
--pos;
- sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
+ sfield_lowering_infos_.push_back(MirSFieldLoweringInfo(field_idxs[pos]));
}
MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
- sfield_lowering_infos_.GetRawStorage(), max_refs - sfield_pos);
+ sfield_lowering_infos_.data(), max_refs - sfield_pos);
}
}
@@ -1338,9 +1338,9 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
}
// Prepare unique method infos, set method info indexes for their MIRs.
- DCHECK_EQ(method_lowering_infos_.Size(), 0u);
+ DCHECK_EQ(method_lowering_infos_.size(), 0u);
const size_t count = invoke_map.size();
- method_lowering_infos_.Resize(count);
+ method_lowering_infos_.reserve(count);
for (size_t pos = 0u; pos != count; ++pos) {
const MapEntry* entry = sequential_entries[pos];
MirMethodLoweringInfo method_info(entry->target_method_idx,
@@ -1348,10 +1348,10 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
if (entry->devirt_target != nullptr) {
method_info.SetDevirtualizationTarget(*entry->devirt_target);
}
- method_lowering_infos_.Insert(method_info);
+ method_lowering_infos_.push_back(method_info);
}
MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
- method_lowering_infos_.GetRawStorage(), count);
+ method_lowering_infos_.data(), count);
}
bool MIRGraph::SkipCompilationByName(const std::string& methodname) {
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index d9531fb..fd8546e 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -901,7 +901,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
/* Return the base virtual register for a SSA name */
int MIRGraph::SRegToVReg(int ssa_reg) const {
- return ssa_base_vregs_->Get(ssa_reg);
+ return ssa_base_vregs_[ssa_reg];
}
/* Any register that is used before being defined is considered live-in */
@@ -1025,14 +1025,14 @@ int MIRGraph::AddNewSReg(int v_reg) {
int subscript = ++ssa_last_defs_[v_reg];
uint32_t ssa_reg = GetNumSSARegs();
SetNumSSARegs(ssa_reg + 1);
- ssa_base_vregs_->Insert(v_reg);
- ssa_subscripts_->Insert(subscript);
- DCHECK_EQ(ssa_base_vregs_->Size(), ssa_subscripts_->Size());
+ ssa_base_vregs_.push_back(v_reg);
+ ssa_subscripts_.push_back(subscript);
+ DCHECK_EQ(ssa_base_vregs_.size(), ssa_subscripts_.size());
// If we are expanding very late, update use counts too.
- if (ssa_reg > 0 && use_counts_.Size() == ssa_reg) {
+ if (ssa_reg > 0 && use_counts_.size() == ssa_reg) {
// Need to expand the counts.
- use_counts_.Insert(0);
- raw_use_counts_.Insert(0);
+ use_counts_.push_back(0);
+ raw_use_counts_.push_back(0);
}
return ssa_reg;
}
@@ -1276,10 +1276,11 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
void MIRGraph::CompilerInitializeSSAConversion() {
size_t num_reg = GetNumOfCodeAndTempVRs();
- ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_reg + GetDefCount() + 128,
- kGrowableArraySSAtoDalvikMap);
- ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_reg + GetDefCount() + 128,
- kGrowableArraySSAtoDalvikMap);
+ ssa_base_vregs_.clear();
+ ssa_base_vregs_.reserve(num_reg + GetDefCount() + 128);
+ ssa_subscripts_.clear();
+ ssa_subscripts_.reserve(num_reg + GetDefCount() + 128);
+
/*
* Initial number of SSA registers is equal to the number of Dalvik
* registers.
@@ -1292,8 +1293,8 @@ void MIRGraph::CompilerInitializeSSAConversion() {
* into "(0 << 16) | i"
*/
for (unsigned int i = 0; i < num_reg; i++) {
- ssa_base_vregs_->Insert(i);
- ssa_subscripts_->Insert(0);
+ ssa_base_vregs_.push_back(i);
+ ssa_subscripts_.push_back(0);
}
/*
@@ -1321,15 +1322,11 @@ void MIRGraph::CompilerInitializeSSAConversion() {
/*
* Allocate the BasicBlockDataFlow structure for the entry and code blocks
*/
- GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
-
- while (true) {
- BasicBlock* bb = iterator.Next();
- if (bb == NULL) break;
+ for (BasicBlock* bb : block_list_) {
if (bb->hidden == true) continue;
if (bb->block_type == kDalvikByteCode ||
- bb->block_type == kEntryBlock ||
- bb->block_type == kExitBlock) {
+ bb->block_type == kEntryBlock ||
+ bb->block_type == kExitBlock) {
bb->data_flow_info =
static_cast<BasicBlockDataFlow*>(arena_->Alloc(sizeof(BasicBlockDataFlow),
kArenaAllocDFInfo));
@@ -1405,8 +1402,8 @@ void MIRGraph::CountUses(struct BasicBlock* bb) {
}
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
int s_reg = mir->ssa_rep->uses[i];
- raw_use_counts_.Increment(s_reg);
- use_counts_.Put(s_reg, use_counts_.Get(s_reg) + weight);
+ raw_use_counts_[s_reg] += 1u;
+ use_counts_[s_reg] += weight;
}
if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -1421,8 +1418,8 @@ void MIRGraph::CountUses(struct BasicBlock* bb) {
* and save results for both here and GenInvoke. For now, go ahead
* and assume all invokes use method*.
*/
- raw_use_counts_.Increment(method_sreg_);
- use_counts_.Put(method_sreg_, use_counts_.Get(method_sreg_) + weight);
+ raw_use_counts_[method_sreg_] += 1u;
+ use_counts_[method_sreg_] += weight;
}
}
}
@@ -1430,21 +1427,16 @@ void MIRGraph::CountUses(struct BasicBlock* bb) {
/* Verify if all the successor is connected with all the claimed predecessors */
bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
- GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
-
- while (true) {
- BasicBlock* pred_bb = GetBasicBlock(iter.Next());
- if (!pred_bb) break;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
bool found = false;
if (pred_bb->taken == bb->id) {
found = true;
} else if (pred_bb->fall_through == bb->id) {
found = true;
} else if (pred_bb->successor_block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(pred_bb->successor_blocks);
- while (true) {
- SuccessorBlockInfo *successor_block_info = iterator.Next();
- if (successor_block_info == NULL) break;
+ for (SuccessorBlockInfo* successor_block_info : pred_bb->successor_blocks) {
BasicBlockId succ_bb = successor_block_info->block;
if (succ_bb == bb->id) {
found = true;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index ce56255..7c0a996 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -76,23 +76,23 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
: reg_location_(NULL),
block_id_map_(std::less<unsigned int>(), arena->Adapter()),
cu_(cu),
- ssa_base_vregs_(NULL),
- ssa_subscripts_(NULL),
+ ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
+ ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
vreg_to_ssa_map_(NULL),
ssa_last_defs_(NULL),
is_constant_v_(NULL),
constant_values_(NULL),
- use_counts_(arena, 256, kGrowableArrayMisc),
- raw_use_counts_(arena, 256, kGrowableArrayMisc),
+ use_counts_(arena->Adapter()),
+ raw_use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
max_num_reachable_blocks_(0),
- dfs_order_(NULL),
- dfs_post_order_(NULL),
- dom_post_order_traversal_(NULL),
- topological_order_(nullptr),
- topological_order_loop_ends_(nullptr),
- topological_order_indexes_(nullptr),
- topological_order_loop_head_stack_(nullptr),
+ dfs_order_(arena->Adapter(kArenaAllocDfsPreOrder)),
+ dfs_post_order_(arena->Adapter(kArenaAllocDfsPostOrder)),
+ dom_post_order_traversal_(arena->Adapter(kArenaAllocDomPostOrder)),
+ topological_order_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
+ topological_order_loop_ends_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
+ topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
+ topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
i_dom_list_(NULL),
def_block_matrix_(NULL),
temp_scoped_alloc_(),
@@ -100,13 +100,13 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
temp_bit_vector_size_(0u),
temp_bit_vector_(nullptr),
temp_gvn_(),
- block_list_(arena, 100, kGrowableArrayBlockList),
+ block_list_(arena->Adapter(kArenaAllocBBList)),
try_block_addr_(NULL),
entry_block_(NULL),
exit_block_(NULL),
num_blocks_(0),
current_code_item_(NULL),
- dex_pc_to_block_map_(arena, 0, kGrowableArrayMisc),
+ dex_pc_to_block_map_(arena->Adapter()),
m_units_(arena->Adapter()),
method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
@@ -127,10 +127,13 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
compiler_temps_committed_(false),
punt_to_interpreter_(false),
merged_df_flags_(0u),
- ifield_lowering_infos_(arena, 0u),
- sfield_lowering_infos_(arena, 0u),
- method_lowering_infos_(arena, 0u),
- gen_suspend_test_list_(arena, 0u) {
+ ifield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
+ sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
+ method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
+ gen_suspend_test_list_(arena->Adapter()) {
+ use_counts_.reserve(256);
+ raw_use_counts_.reserve(256);
+ block_list_.reserve(100);
try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
@@ -149,6 +152,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
}
MIRGraph::~MIRGraph() {
+ STLDeleteElements(&block_list_);
STLDeleteElements(&m_units_);
}
@@ -183,8 +187,7 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
if (insn == NULL) {
LOG(FATAL) << "Break split failed";
}
- BasicBlock* bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++);
- block_list_.Insert(bottom_block);
+ BasicBlock* bottom_block = CreateNewBB(kDalvikByteCode);
bottom_block->start_offset = code_offset;
bottom_block->first_mir_insn = insn;
@@ -207,34 +210,31 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
if (bottom_block->taken != NullBasicBlockId) {
orig_block->taken = NullBasicBlockId;
BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken);
- bb_taken->predecessors->Delete(orig_block->id);
- bb_taken->predecessors->Insert(bottom_block->id);
+ bb_taken->ErasePredecessor(orig_block->id);
+ bb_taken->predecessors.push_back(bottom_block->id);
}
/* Handle the fallthrough path */
bottom_block->fall_through = orig_block->fall_through;
orig_block->fall_through = bottom_block->id;
- bottom_block->predecessors->Insert(orig_block->id);
+ bottom_block->predecessors.push_back(orig_block->id);
if (bottom_block->fall_through != NullBasicBlockId) {
BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through);
- bb_fall_through->predecessors->Delete(orig_block->id);
- bb_fall_through->predecessors->Insert(bottom_block->id);
+ bb_fall_through->ErasePredecessor(orig_block->id);
+ bb_fall_through->predecessors.push_back(bottom_block->id);
}
/* Handle the successor list */
if (orig_block->successor_block_list_type != kNotUsed) {
bottom_block->successor_block_list_type = orig_block->successor_block_list_type;
- bottom_block->successor_blocks = orig_block->successor_blocks;
+ bottom_block->successor_blocks.swap(orig_block->successor_blocks);
orig_block->successor_block_list_type = kNotUsed;
- orig_block->successor_blocks = nullptr;
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_blocks);
- while (true) {
- SuccessorBlockInfo* successor_block_info = iterator.Next();
- if (successor_block_info == nullptr) break;
+ DCHECK(orig_block->successor_blocks.empty()); // Empty after the swap() above.
+ for (SuccessorBlockInfo* successor_block_info : bottom_block->successor_blocks) {
BasicBlock* bb = GetBasicBlock(successor_block_info->block);
if (bb != nullptr) {
- bb->predecessors->Delete(orig_block->id);
- bb->predecessors->Insert(bottom_block->id);
+ bb->ErasePredecessor(orig_block->id);
+ bb->predecessors.push_back(bottom_block->id);
}
}
}
@@ -258,9 +258,9 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
DCHECK_EQ(insn->offset, bottom_block->start_offset);
DCHECK(static_cast<int>(insn->dalvikInsn.opcode) == kMirOpCheck ||
!MIR::DecodedInstruction::IsPseudoMirOp(insn->dalvikInsn.opcode));
- DCHECK_EQ(dex_pc_to_block_map_.Get(insn->offset), orig_block->id);
+ DCHECK_EQ(dex_pc_to_block_map_[insn->offset], orig_block->id);
MIR* p = insn;
- dex_pc_to_block_map_.Put(p->offset, bottom_block->id);
+ dex_pc_to_block_map_[p->offset] = bottom_block->id;
while (p != bottom_block->last_mir_insn) {
p = p->next;
DCHECK(p != nullptr);
@@ -273,8 +273,8 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
* the first in a BasicBlock, we can't hit it here.
*/
if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
- DCHECK_EQ(dex_pc_to_block_map_.Get(p->offset), orig_block->id);
- dex_pc_to_block_map_.Put(p->offset, bottom_block->id);
+ DCHECK_EQ(dex_pc_to_block_map_[p->offset], orig_block->id);
+ dex_pc_to_block_map_[p->offset] = bottom_block->id;
}
}
@@ -295,8 +295,8 @@ BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
return NULL;
}
- int block_id = dex_pc_to_block_map_.Get(code_offset);
- BasicBlock* bb = (block_id == 0) ? NULL : block_list_.Get(block_id);
+ int block_id = dex_pc_to_block_map_[code_offset];
+ BasicBlock* bb = GetBasicBlock(block_id);
if ((bb != NULL) && (bb->start_offset == code_offset)) {
// Does this containing block start with the desired instruction?
@@ -314,10 +314,9 @@ BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
}
// Create a new block.
- bb = NewMemBB(kDalvikByteCode, num_blocks_++);
- block_list_.Insert(bb);
+ bb = CreateNewBB(kDalvikByteCode);
bb->start_offset = code_offset;
- dex_pc_to_block_map_.Put(bb->start_offset, bb->id);
+ dex_pc_to_block_map_[bb->start_offset] = bb->id;
return bb;
}
@@ -457,7 +456,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
/* immed_pred_block_p */ &cur_block);
cur_block->taken = taken_block->id;
- taken_block->predecessors->Insert(cur_block->id);
+ taken_block->predecessors.push_back(cur_block->id);
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
@@ -480,7 +479,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* immed_pred_block_p */
&cur_block);
cur_block->fall_through = fallthrough_block->id;
- fallthrough_block->predecessors->Insert(cur_block->id);
+ fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
FindBlock(cur_offset + width, /* split */ false, /* create */ true,
/* immed_pred_block_p */ NULL);
@@ -539,8 +538,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
}
cur_block->successor_block_list_type =
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? kPackedSwitch : kSparseSwitch;
- cur_block->successor_blocks =
- new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
+ cur_block->successor_blocks.reserve(size);
for (i = 0; i < size; i++) {
BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
@@ -552,15 +550,15 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
successor_block_info->key =
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
first_key + i : keyTable[i];
- cur_block->successor_blocks->Insert(successor_block_info);
- case_block->predecessors->Insert(cur_block->id);
+ cur_block->successor_blocks.push_back(successor_block_info);
+ case_block->predecessors.push_back(cur_block->id);
}
/* Fall-through case */
BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
/* create */ true, /* immed_pred_block_p */ NULL);
cur_block->fall_through = fallthrough_block->id;
- fallthrough_block->predecessors->Insert(cur_block->id);
+ fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
}
@@ -593,8 +591,6 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
}
if (cur_block->successor_block_list_type == kNotUsed) {
cur_block->successor_block_list_type = kCatch;
- cur_block->successor_blocks = new (arena_) GrowableArray<SuccessorBlockInfo*>(
- arena_, 2, kGrowableArraySuccessorBlocks);
}
catch_block->catch_entry = true;
if (kIsDebugBuild) {
@@ -604,17 +600,16 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
(arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
successor_block_info->block = catch_block->id;
successor_block_info->key = iterator.GetHandlerTypeIndex();
- cur_block->successor_blocks->Insert(successor_block_info);
- catch_block->predecessors->Insert(cur_block->id);
+ cur_block->successor_blocks.push_back(successor_block_info);
+ catch_block->predecessors.push_back(cur_block->id);
}
in_try_block = (cur_block->successor_block_list_type != kNotUsed);
}
if (!in_try_block && build_all_edges) {
- BasicBlock* eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
+ BasicBlock* eh_block = CreateNewBB(kExceptionHandling);
cur_block->taken = eh_block->id;
- block_list_.Insert(eh_block);
eh_block->start_offset = cur_offset;
- eh_block->predecessors->Insert(cur_block->id);
+ eh_block->predecessors.push_back(cur_block->id);
}
if (is_throw) {
@@ -657,11 +652,10 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
* Note also that the dex_pc_to_block_map_ entry for the potentially
* throwing instruction will refer to the original basic block.
*/
- BasicBlock* new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
- block_list_.Insert(new_block);
+ BasicBlock* new_block = CreateNewBB(kDalvikByteCode);
new_block->start_offset = insn->offset;
cur_block->fall_through = new_block->id;
- new_block->predecessors->Insert(cur_block->id);
+ new_block->predecessors.push_back(cur_block->id);
MIR* new_insn = NewMIR();
*new_insn = *insn;
insn->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheck);
@@ -689,8 +683,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
// TODO: need to rework expansion of block list & try_block_addr when inlining activated.
// TUNING: use better estimate of basic blocks for following resize.
- block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_);
- dex_pc_to_block_map_.SetSize(dex_pc_to_block_map_.Size() + current_code_item_->insns_size_in_code_units_);
+ block_list_.reserve(block_list_.size() + current_code_item_->insns_size_in_code_units_);
+ dex_pc_to_block_map_.resize(dex_pc_to_block_map_.size() + current_code_item_->insns_size_in_code_units_);
// TODO: replace with explicit resize routine. Using automatic extension side effect for now.
try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
@@ -702,14 +696,11 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
DCHECK(exit_block_ == NULL);
DCHECK_EQ(num_blocks_, 0U);
// Use id 0 to represent a null block.
- BasicBlock* null_block = NewMemBB(kNullBlock, num_blocks_++);
+ BasicBlock* null_block = CreateNewBB(kNullBlock);
DCHECK_EQ(null_block->id, NullBasicBlockId);
null_block->hidden = true;
- block_list_.Insert(null_block);
- entry_block_ = NewMemBB(kEntryBlock, num_blocks_++);
- block_list_.Insert(entry_block_);
- exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
- block_list_.Insert(exit_block_);
+ entry_block_ = CreateNewBB(kEntryBlock);
+ exit_block_ = CreateNewBB(kExitBlock);
// TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
cu_->dex_file = &dex_file;
cu_->class_def_idx = class_def_idx;
@@ -727,13 +718,12 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
/* Current block to record parsed instructions */
- BasicBlock* cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+ BasicBlock* cur_block = CreateNewBB(kDalvikByteCode);
DCHECK_EQ(current_offset_, 0U);
cur_block->start_offset = current_offset_;
- block_list_.Insert(cur_block);
// TODO: for inlining support, insert at the insert point rather than entry block.
entry_block_->fall_through = cur_block->id;
- cur_block->predecessors->Insert(entry_block_->id);
+ cur_block->predecessors.push_back(entry_block_->id);
/* Identify code range in try blocks and set up the empty catch blocks */
ProcessTryCatchBlocks();
@@ -791,7 +781,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
// Associate the starting dex_pc for this opcode with its containing basic block.
- dex_pc_to_block_map_.Put(insn->offset, cur_block->id);
+ dex_pc_to_block_map_[insn->offset] = cur_block->id;
code_ptr += width;
@@ -801,7 +791,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
} else if (flags & Instruction::kReturn) {
cur_block->terminated_by_return = true;
cur_block->fall_through = exit_block_->id;
- exit_block_->predecessors->Insert(cur_block->id);
+ exit_block_->predecessors.push_back(cur_block->id);
/*
* Terminate the current block if there are instructions
* afterwards.
@@ -850,7 +840,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) {
cur_block->fall_through = next_block->id;
- next_block->predecessors->Insert(cur_block->id);
+ next_block->predecessors.push_back(cur_block->id);
}
cur_block = next_block;
}
@@ -915,7 +905,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
int idx;
for (idx = 0; idx < num_blocks; idx++) {
- int block_idx = all_blocks ? idx : dfs_order_->Get(idx);
+ int block_idx = all_blocks ? idx : dfs_order_[idx];
BasicBlock* bb = GetBasicBlock(block_idx);
if (bb == NULL) continue;
if (bb->block_type == kDead) continue;
@@ -971,23 +961,17 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n",
bb->start_offset, bb->id,
(bb->successor_block_list_type == kCatch) ? "Mrecord" : "record");
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
- SuccessorBlockInfo* successor_block_info = iterator.Next();
+ int last_succ_id = static_cast<int>(bb->successor_blocks.size() - 1u);
int succ_id = 0;
- while (true) {
- if (successor_block_info == NULL) break;
-
+ for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
- SuccessorBlockInfo *next_successor_block_info = iterator.Next();
-
fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n",
- succ_id++,
+ succ_id,
successor_block_info->key,
dest_block->start_offset,
- (next_successor_block_info != NULL) ? " | " : " ");
-
- successor_block_info = next_successor_block_info;
+ (succ_id != last_succ_id) ? " | " : " ");
+ ++succ_id;
}
fprintf(file, " }\"];\n\n");
@@ -996,13 +980,8 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
block_name1, bb->start_offset, bb->id);
// Link the successor pseudo-block with all of its potential targets.
- GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks);
-
succ_id = 0;
- while (true) {
- SuccessorBlockInfo* successor_block_info = iter.Next();
- if (successor_block_info == NULL) break;
-
+ for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
GetBlockName(dest_block, block_name2);
@@ -1603,7 +1582,6 @@ const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
/* Debug Utility - dump a compilation unit */
void MIRGraph::DumpMIRGraph() {
- BasicBlock* bb;
const char* block_type_names[] = {
"Null Block",
"Entry Block",
@@ -1616,11 +1594,8 @@ void MIRGraph::DumpMIRGraph() {
LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
LOG(INFO) << GetInsns(0) << " insns";
LOG(INFO) << GetNumBlocks() << " blocks in total";
- GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
- while (true) {
- bb = iterator.Next();
- if (bb == NULL) break;
+ for (BasicBlock* bb : block_list_) {
LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
bb->id,
block_type_names[bb->block_type],
@@ -1678,15 +1653,10 @@ MIR* MIRGraph::NewMIR() {
// Allocate a new basic block.
BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
- BasicBlock* bb = new (arena_) BasicBlock();
+ BasicBlock* bb = new (arena_) BasicBlock(block_id, block_type, arena_);
- bb->block_type = block_type;
- bb->id = block_id;
// TUNING: better estimate of the exit block predecessors?
- bb->predecessors = new (arena_) GrowableArray<BasicBlockId>(arena_,
- (block_type == kExitBlock) ? 2048 : 2,
- kGrowableArrayPredecessors);
- bb->successor_block_list_type = kNotUsed;
+ bb->predecessors.reserve((block_type == kExitBlock) ? 2048 : 2);
block_id_map_.Put(block_id, block_id);
return bb;
}
@@ -1699,16 +1669,12 @@ void MIRGraph::InitializeConstantPropagation() {
void MIRGraph::InitializeMethodUses() {
// The gate starts by initializing the use counts.
int num_ssa_regs = GetNumSSARegs();
- use_counts_.Resize(num_ssa_regs + 32);
- raw_use_counts_.Resize(num_ssa_regs + 32);
- // Resize does not actually reset the number of used, so reset before initialization.
- use_counts_.Reset();
- raw_use_counts_.Reset();
- // Initialize list.
- for (int i = 0; i < num_ssa_regs; i++) {
- use_counts_.Insert(0);
- raw_use_counts_.Insert(0);
- }
+ use_counts_.clear();
+ use_counts_.reserve(num_ssa_regs + 32);
+ use_counts_.resize(num_ssa_regs, 0u);
+ raw_use_counts_.clear();
+ raw_use_counts_.reserve(num_ssa_regs + 32);
+ raw_use_counts_.resize(num_ssa_regs, 0u);
}
void MIRGraph::SSATransformationStart() {
@@ -1800,9 +1766,9 @@ static void ComputeUnvisitedReachableFrom(MIRGraph* mir_graph, BasicBlockId bb_i
tmp_stack->pop_back();
BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
DCHECK(current_bb != nullptr);
- GrowableArray<BasicBlockId>::Iterator iter(current_bb->predecessors);
- BasicBlock* pred_bb = mir_graph->GetBasicBlock(iter.Next());
- for ( ; pred_bb != nullptr; pred_bb = mir_graph->GetBasicBlock(iter.Next())) {
+ for (BasicBlockId pred_id : current_bb->predecessors) {
+ BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
if (!pred_bb->visited && !reachable->IsBitSet(pred_bb->id)) {
reachable->SetBit(pred_bb->id);
tmp_stack->push_back(pred_bb->id);
@@ -1823,36 +1789,27 @@ void MIRGraph::ComputeTopologicalSortOrder() {
loop_exit_blocks.ClearAllBits();
// Count the number of blocks to process and add the entry block(s).
- GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
unsigned int num_blocks_to_process = 0u;
- for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ for (BasicBlock* bb : block_list_) {
if (bb->hidden == true) {
continue;
}
num_blocks_to_process += 1u;
- if (bb->predecessors->Size() == 0u) {
+ if (bb->predecessors.size() == 0u) {
// Add entry block to the queue.
q.push(bb);
}
}
- // Create the topological order if need be.
- if (topological_order_ == nullptr) {
- topological_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, num_blocks);
- topological_order_loop_ends_ = new (arena_) GrowableArray<uint16_t>(arena_, num_blocks);
- topological_order_indexes_ = new (arena_) GrowableArray<uint16_t>(arena_, num_blocks);
- }
- topological_order_->Reset();
- topological_order_loop_ends_->Reset();
- topological_order_indexes_->Reset();
- topological_order_loop_ends_->Resize(num_blocks);
- topological_order_indexes_->Resize(num_blocks);
- for (BasicBlockId i = 0; i != num_blocks; ++i) {
- topological_order_loop_ends_->Insert(0u);
- topological_order_indexes_->Insert(static_cast<uint16_t>(-1));
- }
+ // Clear the topological order arrays.
+ topological_order_.clear();
+ topological_order_.reserve(num_blocks);
+ topological_order_loop_ends_.clear();
+ topological_order_loop_ends_.resize(num_blocks, 0u);
+ topological_order_indexes_.clear();
+ topological_order_indexes_.resize(num_blocks, static_cast<uint16_t>(-1));
// Mark all blocks as unvisited.
ClearAllVisitedFlags();
@@ -1875,8 +1832,8 @@ void MIRGraph::ComputeTopologicalSortOrder() {
if (bb->visited) {
// Loop head: it was already processed, mark end and copy exit blocks to the queue.
DCHECK(q.empty()) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- uint16_t idx = static_cast<uint16_t>(topological_order_->Size());
- topological_order_loop_ends_->Put(topological_order_indexes_->Get(bb->id), idx);
+ uint16_t idx = static_cast<uint16_t>(topological_order_.size());
+ topological_order_loop_ends_[topological_order_indexes_[bb->id]] = idx;
DCHECK_EQ(loop_head_stack.back(), bb->id);
loop_head_stack.pop_back();
ArenaBitVector* reachable =
@@ -1919,15 +1876,16 @@ void MIRGraph::ComputeTopologicalSortOrder() {
continue;
}
- GrowableArray<BasicBlockId>::Iterator pred_iter(candidate->predecessors);
- BasicBlock* pred_bb = GetBasicBlock(pred_iter.Next());
- for ( ; pred_bb != nullptr; pred_bb = GetBasicBlock(pred_iter.Next())) {
+ for (BasicBlockId pred_id : candidate->predecessors) {
+ BasicBlock* pred_bb = GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
if (pred_bb != candidate && !pred_bb->visited &&
!pred_bb->dominators->IsBitSet(candidate->id)) {
- break; // Keep non-null pred_bb to indicate failure.
+ candidate = nullptr; // Set candidate to null to indicate failure.
+ break;
}
}
- if (pred_bb == nullptr) {
+ if (candidate != nullptr) {
bb = candidate;
break;
}
@@ -1947,9 +1905,9 @@ void MIRGraph::ComputeTopologicalSortOrder() {
bb->visited = true;
// Now add the basic block.
- uint16_t idx = static_cast<uint16_t>(topological_order_->Size());
- topological_order_indexes_->Put(bb->id, idx);
- topological_order_->Insert(bb->id);
+ uint16_t idx = static_cast<uint16_t>(topological_order_.size());
+ topological_order_indexes_[bb->id] = idx;
+ topological_order_.push_back(bb->id);
// Update visited_cnt_values for children.
ChildBlockIterator succIter(bb, this);
@@ -1961,7 +1919,7 @@ void MIRGraph::ComputeTopologicalSortOrder() {
// One more predecessor was visited.
visited_cnt_values[successor->id] += 1u;
- if (visited_cnt_values[successor->id] == successor->predecessors->Size()) {
+ if (visited_cnt_values[successor->id] == successor->predecessors.size()) {
if (loop_head_stack.empty() ||
loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(successor->id)) {
q.push(successor);
@@ -1974,8 +1932,8 @@ void MIRGraph::ComputeTopologicalSortOrder() {
}
// Prepare the loop head stack for iteration.
- topological_order_loop_head_stack_ =
- new (arena_) GrowableArray<std::pair<uint16_t, bool>>(arena_, max_nested_loops);
+ topological_order_loop_head_stack_.clear();
+ topological_order_loop_head_stack_.reserve(max_nested_loops);
}
bool BasicBlock::IsExceptionBlock() const {
@@ -1992,8 +1950,8 @@ bool MIRGraph::HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id)
return false;
int idx;
- for (idx = gen_suspend_test_list_.Size() - 1; idx >= 0; idx--) {
- BasicBlock* bb = gen_suspend_test_list_.Get(idx);
+ for (idx = gen_suspend_test_list_.size() - 1; idx >= 0; idx--) {
+ BasicBlock* bb = gen_suspend_test_list_[idx];
if (bb == source)
return true; // The block has been inserted by a suspend check before.
if (source->dominators->IsBitSet(bb->id) && bb->dominators->IsBitSet(target_id))
@@ -2009,7 +1967,7 @@ ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph)
// Check if we actually do have successors.
if (basic_block_ != 0 && basic_block_->successor_block_list_type != kNotUsed) {
have_successors_ = true;
- successor_iter_.Reset(basic_block_->successor_blocks);
+ successor_iter_ = basic_block_->successor_blocks.cbegin();
}
}
@@ -2042,9 +2000,10 @@ BasicBlock* ChildBlockIterator::Next() {
// We visited both taken and fallthrough. Now check if we have successors we need to visit.
if (have_successors_ == true) {
// Get information about next successor block.
- for (SuccessorBlockInfo* successor_block_info = successor_iter_.Next();
- successor_block_info != nullptr;
- successor_block_info = successor_iter_.Next()) {
+ auto end = basic_block_->successor_blocks.cend();
+ while (successor_iter_ != end) {
+ SuccessorBlockInfo* successor_block_info = *successor_iter_;
+ ++successor_iter_;
// If block was replaced by zero block, take next one.
if (successor_block_info->block != NullBasicBlockId) {
return mir_graph_->GetBasicBlock(successor_block_info->block);
@@ -2075,17 +2034,12 @@ BasicBlock* BasicBlock::Copy(MIRGraph* mir_graph) {
result_bb->successor_block_list_type = successor_block_list_type;
if (result_bb->successor_block_list_type != kNotUsed) {
- size_t size = successor_blocks->Size();
- result_bb->successor_blocks = new (arena) GrowableArray<SuccessorBlockInfo*>(arena, size, kGrowableArraySuccessorBlocks);
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks);
- while (true) {
- SuccessorBlockInfo* sbi_old = iterator.Next();
- if (sbi_old == nullptr) {
- break;
- }
- SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>(arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
+ result_bb->successor_blocks.reserve(successor_blocks.size());
+ for (SuccessorBlockInfo* sbi_old : successor_blocks) {
+ SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>(
+ arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
memcpy(sbi_new, sbi_old, sizeof(SuccessorBlockInfo));
- result_bb->successor_blocks->Insert(sbi_new);
+ result_bb->successor_blocks.push_back(sbi_new);
}
}
@@ -2244,14 +2198,10 @@ void BasicBlock::Hide(CompilationUnit* c_unit) {
first_mir_insn = nullptr;
last_mir_insn = nullptr;
- GrowableArray<BasicBlockId>::Iterator iterator(predecessors);
-
MIRGraph* mir_graph = c_unit->mir_graph.get();
- while (true) {
- BasicBlock* pred_bb = mir_graph->GetBasicBlock(iterator.Next());
- if (pred_bb == nullptr) {
- break;
- }
+ for (BasicBlockId pred_id : predecessors) {
+ BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
// Sadly we have to go through the children by hand here.
pred_bb->ReplaceChild(id, NullBasicBlockId);
@@ -2261,8 +2211,8 @@ void BasicBlock::Hide(CompilationUnit* c_unit) {
ChildBlockIterator successorChildIter(this, mir_graph);
for (BasicBlock* childPtr = successorChildIter.Next(); childPtr != 0; childPtr = successorChildIter.Next()) {
- // Replace child with null child.
- childPtr->predecessors->Delete(id);
+ // Erase this predecessor from child.
+ childPtr->ErasePredecessor(id);
}
// Remove link to children.
@@ -2328,12 +2278,7 @@ bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) {
}
if (successor_block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks);
- while (true) {
- SuccessorBlockInfo* successor_block_info = iterator.Next();
- if (successor_block_info == nullptr) {
- break;
- }
+ for (SuccessorBlockInfo* successor_block_info : successor_blocks) {
if (successor_block_info->block == old_bb) {
successor_block_info->block = new_bb;
found = true;
@@ -2344,28 +2289,20 @@ bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) {
return found;
}
-void BasicBlock::UpdatePredecessor(BasicBlockId old_parent, BasicBlockId new_parent) {
- GrowableArray<BasicBlockId>::Iterator iterator(predecessors);
- bool found = false;
-
- while (true) {
- BasicBlockId pred_bb_id = iterator.Next();
-
- if (pred_bb_id == NullBasicBlockId) {
- break;
- }
-
- if (pred_bb_id == old_parent) {
- size_t idx = iterator.GetIndex() - 1;
- predecessors->Put(idx, new_parent);
- found = true;
- break;
- }
- }
+void BasicBlock::ErasePredecessor(BasicBlockId old_pred) {
+ auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
+ DCHECK(pos != predecessors.end());
+ predecessors.erase(pos);
+}
- // If not found, add it.
- if (found == false) {
- predecessors->Insert(new_parent);
+void BasicBlock::UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred) {
+ DCHECK_NE(new_pred, NullBasicBlockId);
+ auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
+ if (pos != predecessors.end()) {
+ *pos = new_pred;
+ } else {
+ // If not found, add it.
+ predecessors.push_back(new_pred);
}
}
@@ -2373,7 +2310,7 @@ void BasicBlock::UpdatePredecessor(BasicBlockId old_parent, BasicBlockId new_par
// post-incremented.
BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
BasicBlock* res = NewMemBB(block_type, num_blocks_++);
- block_list_.Insert(res);
+ block_list_.push_back(res);
return res;
}
@@ -2383,7 +2320,7 @@ void MIRGraph::CalculateBasicBlockInformation() {
}
void MIRGraph::InitializeBasicBlockData() {
- num_blocks_ = block_list_.Size();
+ num_blocks_ = block_list_.size();
}
int MIR::DecodedInstruction::FlagsOf() const {
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 078970d..ea2ca39 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -27,7 +27,6 @@
#include "mir_field_info.h"
#include "mir_method_info.h"
#include "utils/arena_bit_vector.h"
-#include "utils/growable_array.h"
#include "utils/arena_containers.h"
#include "utils/scoped_arena_containers.h"
#include "reg_location.h"
@@ -394,6 +393,17 @@ struct MIR {
struct SuccessorBlockInfo;
struct BasicBlock {
+ BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
+ : id(block_id),
+ dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
+ block_type(type),
+ successor_block_list_type(kNotUsed),
+ visited(), hidden(), catch_entry(), explicit_throw(), conditional_branch(),
+ terminated_by_return(), dominates_return(), use_lvn(), first_mir_insn(),
+ last_mir_insn(), data_flow_info(), dominators(), i_dominated(), dom_frontier(),
+ predecessors(allocator->Adapter(kArenaAllocBBPredecessors)),
+ successor_blocks(allocator->Adapter(kArenaAllocSuccessor)) {
+ }
BasicBlockId id;
BasicBlockId dfs_id;
NarrowDexOffset start_offset; // Offset in code units.
@@ -417,8 +427,8 @@ struct BasicBlock {
ArenaBitVector* dominators;
ArenaBitVector* i_dominated; // Set nodes being immediately dominated.
ArenaBitVector* dom_frontier; // Dominance frontier.
- GrowableArray<BasicBlockId>* predecessors;
- GrowableArray<SuccessorBlockInfo*>* successor_blocks;
+ ArenaVector<BasicBlockId> predecessors;
+ ArenaVector<SuccessorBlockInfo*> successor_blocks;
void AppendMIR(MIR* mir);
void AppendMIRList(MIR* first_list_mir, MIR* last_list_mir);
@@ -446,7 +456,7 @@ struct BasicBlock {
* @brief Hide the BasicBlock.
* @details Set it to kDalvikByteCode, set hidden to true, remove all MIRs,
* remove itself from any predecessor edges, remove itself from any
- * child's predecessor growable array.
+ * child's predecessor array.
*/
void Hide(CompilationUnit* c_unit);
@@ -461,7 +471,12 @@ struct BasicBlock {
bool ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb);
/**
- * @brief Update the predecessor growable array from old_pred to new_pred.
+ * @brief Erase the predecessor old_pred.
+ */
+ void ErasePredecessor(BasicBlockId old_pred);
+
+ /**
+ * @brief Update the predecessor array from old_pred to new_pred.
*/
void UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred);
@@ -512,7 +527,7 @@ class ChildBlockIterator {
bool visited_fallthrough_;
bool visited_taken_;
bool have_successors_;
- GrowableArray<SuccessorBlockInfo*>::Iterator successor_iter_;
+ ArenaVector<SuccessorBlockInfo*>::const_iterator successor_iter_;
};
/*
@@ -615,26 +630,27 @@ class MIRGraph {
}
BasicBlock* GetBasicBlock(unsigned int block_id) const {
- return (block_id == NullBasicBlockId) ? NULL : block_list_.Get(block_id);
+ DCHECK_LT(block_id, block_list_.size()); // NOTE: NullBasicBlockId is 0.
+ return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
}
size_t GetBasicBlockListCount() const {
- return block_list_.Size();
+ return block_list_.size();
}
- GrowableArray<BasicBlock*>* GetBlockList() {
- return &block_list_;
+ const ArenaVector<BasicBlock*>& GetBlockList() {
+ return block_list_;
}
- GrowableArray<BasicBlockId>* GetDfsOrder() {
+ const ArenaVector<BasicBlockId>& GetDfsOrder() {
return dfs_order_;
}
- GrowableArray<BasicBlockId>* GetDfsPostOrder() {
+ const ArenaVector<BasicBlockId>& GetDfsPostOrder() {
return dfs_post_order_;
}
- GrowableArray<BasicBlockId>* GetDomPostOrder() {
+ const ArenaVector<BasicBlockId>& GetDomPostOrder() {
return dom_post_order_traversal_;
}
@@ -681,20 +697,20 @@ class MIRGraph {
void DoCacheFieldLoweringInfo();
const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) const {
- DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.Size());
- return ifield_lowering_infos_.GetRawStorage()[mir->meta.ifield_lowering_info];
+ DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.size());
+ return ifield_lowering_infos_[mir->meta.ifield_lowering_info];
}
const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) const {
- DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.Size());
- return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
+ DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.size());
+ return sfield_lowering_infos_[mir->meta.sfield_lowering_info];
}
void DoCacheMethodLoweringInfo();
const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
- DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.Size());
- return method_lowering_infos_.GetRawStorage()[mir->meta.method_lowering_info];
+ DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
+ return method_lowering_infos_[mir->meta.method_lowering_info];
}
void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
@@ -707,24 +723,24 @@ class MIRGraph {
void BasicBlockOptimization();
- GrowableArray<BasicBlockId>* GetTopologicalSortOrder() {
- DCHECK(topological_order_ != nullptr);
+ const ArenaVector<BasicBlockId>& GetTopologicalSortOrder() {
+ DCHECK(!topological_order_.empty());
return topological_order_;
}
- GrowableArray<BasicBlockId>* GetTopologicalSortOrderLoopEnds() {
- DCHECK(topological_order_loop_ends_ != nullptr);
+ const ArenaVector<BasicBlockId>& GetTopologicalSortOrderLoopEnds() {
+ DCHECK(!topological_order_loop_ends_.empty());
return topological_order_loop_ends_;
}
- GrowableArray<BasicBlockId>* GetTopologicalSortOrderIndexes() {
- DCHECK(topological_order_indexes_ != nullptr);
+ const ArenaVector<BasicBlockId>& GetTopologicalSortOrderIndexes() {
+ DCHECK(!topological_order_indexes_.empty());
return topological_order_indexes_;
}
- GrowableArray<std::pair<uint16_t, bool>>* GetTopologicalSortOrderLoopHeadStack() {
- DCHECK(topological_order_loop_head_stack_ != nullptr);
- return topological_order_loop_head_stack_;
+ ArenaVector<std::pair<uint16_t, bool>>* GetTopologicalSortOrderLoopHeadStack() {
+ DCHECK(!topological_order_.empty()); // Checking the main array, not the stack.
+ return &topological_order_loop_head_stack_;
}
bool IsConst(int32_t s_reg) const {
@@ -802,16 +818,19 @@ class MIRGraph {
return num_reachable_blocks_;
}
- int GetUseCount(int sreg) const {
- return use_counts_.Get(sreg);
+ uint32_t GetUseCount(int sreg) const {
+ DCHECK_LT(static_cast<size_t>(sreg), use_counts_.size());
+ return use_counts_[sreg];
}
- int GetRawUseCount(int sreg) const {
- return raw_use_counts_.Get(sreg);
+ uint32_t GetRawUseCount(int sreg) const {
+ DCHECK_LT(static_cast<size_t>(sreg), raw_use_counts_.size());
+ return raw_use_counts_[sreg];
}
int GetSSASubscript(int ssa_reg) const {
- return ssa_subscripts_->Get(ssa_reg);
+ DCHECK_LT(static_cast<size_t>(ssa_reg), ssa_subscripts_.size());
+ return ssa_subscripts_[ssa_reg];
}
RegLocation GetRawSrc(MIR* mir, int num) {
@@ -1180,9 +1199,9 @@ class MIRGraph {
// Used for removing redudant suspend tests
void AppendGenSuspendTestList(BasicBlock* bb) {
- if (gen_suspend_test_list_.Size() == 0 ||
- gen_suspend_test_list_.Get(gen_suspend_test_list_.Size() - 1) != bb) {
- gen_suspend_test_list_.Insert(bb);
+ if (gen_suspend_test_list_.size() == 0 ||
+ gen_suspend_test_list_.back() != bb) {
+ gen_suspend_test_list_.push_back(bb);
}
}
@@ -1248,30 +1267,30 @@ class MIRGraph {
std::string* skip_message);
CompilationUnit* const cu_;
- GrowableArray<int>* ssa_base_vregs_;
- GrowableArray<int>* ssa_subscripts_;
+ ArenaVector<int> ssa_base_vregs_;
+ ArenaVector<int> ssa_subscripts_;
// Map original Dalvik virtual reg i to the current SSA name.
int* vreg_to_ssa_map_; // length == method->registers_size
int* ssa_last_defs_; // length == method->registers_size
ArenaBitVector* is_constant_v_; // length == num_ssa_reg
int* constant_values_; // length == num_ssa_reg
// Use counts of ssa names.
- GrowableArray<uint32_t> use_counts_; // Weighted by nesting depth
- GrowableArray<uint32_t> raw_use_counts_; // Not weighted
+ ArenaVector<uint32_t> use_counts_; // Weighted by nesting depth
+ ArenaVector<uint32_t> raw_use_counts_; // Not weighted
unsigned int num_reachable_blocks_;
unsigned int max_num_reachable_blocks_;
- GrowableArray<BasicBlockId>* dfs_order_;
- GrowableArray<BasicBlockId>* dfs_post_order_;
- GrowableArray<BasicBlockId>* dom_post_order_traversal_;
- GrowableArray<BasicBlockId>* topological_order_;
+ ArenaVector<BasicBlockId> dfs_order_;
+ ArenaVector<BasicBlockId> dfs_post_order_;
+ ArenaVector<BasicBlockId> dom_post_order_traversal_;
+ ArenaVector<BasicBlockId> topological_order_;
// Indexes in topological_order_ need to be only as big as the BasicBlockId.
COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), assuming_16_bit_BasicBlockId);
// For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
- GrowableArray<uint16_t>* topological_order_loop_ends_;
+ ArenaVector<uint16_t> topological_order_loop_ends_;
// Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
- GrowableArray<uint16_t>* topological_order_indexes_;
+ ArenaVector<uint16_t> topological_order_indexes_;
// Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
- GrowableArray<std::pair<uint16_t, bool>>* topological_order_loop_head_stack_;
+ ArenaVector<std::pair<uint16_t, bool>> topological_order_loop_head_stack_;
int* i_dom_list_;
ArenaBitVector** def_block_matrix_; // original num registers x num_blocks.
std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
@@ -1280,13 +1299,13 @@ class MIRGraph {
ArenaBitVector* temp_bit_vector_;
std::unique_ptr<GlobalValueNumbering> temp_gvn_;
static const int kInvalidEntry = -1;
- GrowableArray<BasicBlock*> block_list_;
+ ArenaVector<BasicBlock*> block_list_;
ArenaBitVector* try_block_addr_;
BasicBlock* entry_block_;
BasicBlock* exit_block_;
unsigned int num_blocks_;
const DexFile::CodeItem* current_code_item_;
- GrowableArray<uint16_t> dex_pc_to_block_map_; // FindBlock lookup cache.
+ ArenaVector<uint16_t> dex_pc_to_block_map_; // FindBlock lookup cache.
ArenaVector<DexCompilationUnit*> m_units_; // List of methods included in this graph
typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset)
ArenaVector<MIRLocation> method_stack_; // Include stack
@@ -1310,11 +1329,11 @@ class MIRGraph {
bool compiler_temps_committed_; // Keeps track whether number of temps has been frozen (for example post frame size calculation).
bool punt_to_interpreter_; // Difficult or not worthwhile - just interpret.
uint64_t merged_df_flags_;
- GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
- GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
- GrowableArray<MirMethodLoweringInfo> method_lowering_infos_;
+ ArenaVector<MirIFieldLoweringInfo> ifield_lowering_infos_;
+ ArenaVector<MirSFieldLoweringInfo> sfield_lowering_infos_;
+ ArenaVector<MirMethodLoweringInfo> method_lowering_infos_;
static const uint64_t oat_data_flow_attributes_[kMirOpLast];
- GrowableArray<BasicBlock*> gen_suspend_test_list_; // List of blocks containing suspend tests
+ ArenaVector<BasicBlock*> gen_suspend_test_list_; // List of blocks containing suspend tests
friend class ClassInitCheckEliminationTest;
friend class GlobalValueNumberingTest;
diff --git a/compiler/dex/mir_graph_test.cc b/compiler/dex/mir_graph_test.cc
index bdc05a9..e34759b 100644
--- a/compiler/dex/mir_graph_test.cc
+++ b/compiler/dex/mir_graph_test.cc
@@ -57,51 +57,43 @@ class TopologicalSortOrderTest : public testing::Test {
void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
cu_.mir_graph->block_id_map_.clear();
- cu_.mir_graph->block_list_.Reset();
+ cu_.mir_graph->block_list_.clear();
ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
ASSERT_EQ(kNullBlock, defs[0].type);
ASSERT_EQ(kEntryBlock, defs[1].type);
ASSERT_EQ(kExitBlock, defs[2].type);
for (size_t i = 0u; i != count; ++i) {
const BBDef* def = &defs[i];
- BasicBlock* bb = cu_.mir_graph->NewMemBB(def->type, i);
- cu_.mir_graph->block_list_.Insert(bb);
+ BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
if (def->num_successors <= 2) {
bb->successor_block_list_type = kNotUsed;
- bb->successor_blocks = nullptr;
bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
} else {
bb->successor_block_list_type = kPackedSwitch;
bb->fall_through = 0u;
bb->taken = 0u;
- bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
- &cu_.arena, def->num_successors, kGrowableArraySuccessorBlocks);
+ bb->successor_blocks.reserve(def->num_successors);
for (size_t j = 0u; j != def->num_successors; ++j) {
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
successor_block_info->block = j;
successor_block_info->key = 0u; // Not used by class init check elimination.
- bb->successor_blocks->Insert(successor_block_info);
+ bb->successor_blocks.push_back(successor_block_info);
}
}
- bb->predecessors = new (&cu_.arena) GrowableArray<BasicBlockId>(
- &cu_.arena, def->num_predecessors, kGrowableArrayPredecessors);
- for (size_t j = 0u; j != def->num_predecessors; ++j) {
- ASSERT_NE(0u, def->predecessors[j]);
- bb->predecessors->Insert(def->predecessors[j]);
- }
+ bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
}
}
cu_.mir_graph->num_blocks_ = count;
- ASSERT_EQ(count, cu_.mir_graph->block_list_.Size());
- cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_.Get(1);
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
- cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_.Get(2);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(cu_.arena.Alloc(sizeof(DexFile::CodeItem),
@@ -120,21 +112,21 @@ class TopologicalSortOrderTest : public testing::Test {
cu_.mir_graph->ComputeDominators();
cu_.mir_graph->ComputeTopologicalSortOrder();
cu_.mir_graph->SSATransformationEnd();
- ASSERT_NE(cu_.mir_graph->topological_order_, nullptr);
- ASSERT_NE(cu_.mir_graph->topological_order_loop_ends_, nullptr);
- ASSERT_NE(cu_.mir_graph->topological_order_indexes_, nullptr);
- ASSERT_EQ(cu_.mir_graph->GetNumBlocks(), cu_.mir_graph->topological_order_indexes_->Size());
- for (size_t i = 0, size = cu_.mir_graph->GetTopologicalSortOrder()->Size(); i != size; ++i) {
- ASSERT_LT(cu_.mir_graph->topological_order_->Get(i), cu_.mir_graph->GetNumBlocks());
- BasicBlockId id = cu_.mir_graph->topological_order_->Get(i);
- EXPECT_EQ(i, cu_.mir_graph->topological_order_indexes_->Get(id));
+ ASSERT_FALSE(cu_.mir_graph->topological_order_.empty());
+ ASSERT_FALSE(cu_.mir_graph->topological_order_loop_ends_.empty());
+ ASSERT_FALSE(cu_.mir_graph->topological_order_indexes_.empty());
+ ASSERT_EQ(cu_.mir_graph->GetNumBlocks(), cu_.mir_graph->topological_order_indexes_.size());
+ for (size_t i = 0, size = cu_.mir_graph->GetTopologicalSortOrder().size(); i != size; ++i) {
+ ASSERT_LT(cu_.mir_graph->topological_order_[i], cu_.mir_graph->GetNumBlocks());
+ BasicBlockId id = cu_.mir_graph->topological_order_[i];
+ EXPECT_EQ(i, cu_.mir_graph->topological_order_indexes_[id]);
}
}
void DoCheckOrder(const BasicBlockId* ids, size_t count) {
- ASSERT_EQ(count, cu_.mir_graph->GetTopologicalSortOrder()->Size());
+ ASSERT_EQ(count, cu_.mir_graph->GetTopologicalSortOrder().size());
for (size_t i = 0; i != count; ++i) {
- EXPECT_EQ(ids[i], cu_.mir_graph->GetTopologicalSortOrder()->Get(i)) << i;
+ EXPECT_EQ(ids[i], cu_.mir_graph->GetTopologicalSortOrder()[i]) << i;
}
}
@@ -145,8 +137,8 @@ class TopologicalSortOrderTest : public testing::Test {
void DoCheckLoopEnds(const uint16_t* ends, size_t count) {
for (size_t i = 0; i != count; ++i) {
- ASSERT_LT(i, cu_.mir_graph->GetTopologicalSortOrderLoopEnds()->Size());
- EXPECT_EQ(ends[i], cu_.mir_graph->GetTopologicalSortOrderLoopEnds()->Get(i)) << i;
+ ASSERT_LT(i, cu_.mir_graph->GetTopologicalSortOrderLoopEnds().size());
+ EXPECT_EQ(ends[i], cu_.mir_graph->GetTopologicalSortOrderLoopEnds()[i]) << i;
}
}
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index fdabc3e..41f63c1 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -28,7 +28,7 @@
namespace art {
static unsigned int Predecessors(BasicBlock* bb) {
- return bb->predecessors->Size();
+ return bb->predecessors.size();
}
/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
@@ -230,7 +230,8 @@ COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_g
COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
int MIRGraph::GetSSAUseCount(int s_reg) {
- return raw_use_counts_.Get(s_reg);
+ DCHECK_LT(static_cast<size_t>(s_reg), ssa_subscripts_.size());
+ return raw_use_counts_[s_reg];
}
size_t MIRGraph::GetNumBytesForSpecialTemps() const {
@@ -697,7 +698,8 @@ bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
break;
}
- BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0));
+ DCHECK(!walker->predecessors.empty());
+ BasicBlock* prev = GetBasicBlock(walker->predecessors[0]);
// If we visited the predecessor, we are done.
if (prev->visited) {
@@ -784,7 +786,7 @@ void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
*bb->last_mir_insn = *throw_insn;
// Use the successor info from the next block
bb->successor_block_list_type = bb_next->successor_block_list_type;
- bb->successor_blocks = bb_next->successor_blocks;
+ bb->successor_blocks.swap(bb_next->successor_blocks); // Swap instead of copying.
// Use the ending block linkage from the next block
bb->fall_through = bb_next->fall_through;
GetBasicBlock(bb->taken)->block_type = kDead; // Kill the unused exception block
@@ -856,8 +858,8 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
int this_reg = GetFirstInVR();
ssa_regs_to_check->ClearBit(this_reg);
}
- } else if (bb->predecessors->Size() == 1) {
- BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
+ } else if (bb->predecessors.size() == 1) {
+ BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
// pred_bb must have already been processed at least once.
DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
@@ -883,25 +885,22 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
}
} else {
// Starting state is union of all incoming arcs
- GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
- BasicBlock* pred_bb = GetBasicBlock(iter.Next());
- CHECK(pred_bb != NULL);
- while (pred_bb->data_flow_info->ending_check_v == nullptr) {
- pred_bb = GetBasicBlock(iter.Next());
- // At least one predecessor must have been processed before this bb.
+ bool copied_first = false;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
DCHECK(pred_bb->data_flow_info != nullptr);
- }
- ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
- while (true) {
- pred_bb = GetBasicBlock(iter.Next());
- if (!pred_bb) break;
- DCHECK(pred_bb->data_flow_info != nullptr);
if (pred_bb->data_flow_info->ending_check_v == nullptr) {
continue;
}
- ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
+ if (!copied_first) {
+ copied_first = true;
+ ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ } else {
+ ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
+ }
}
+ DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
}
// At this point, ssa_regs_to_check shows which sregs have an object definition with
// no intervening uses.
@@ -1156,35 +1155,31 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
DCHECK(classes_to_check != nullptr);
if (bb->block_type == kEntryBlock) {
classes_to_check->SetInitialBits(temp_bit_vector_size_);
- } else if (bb->predecessors->Size() == 1) {
- BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
+ } else if (bb->predecessors.size() == 1) {
+ BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
// pred_bb must have already been processed at least once.
DCHECK(pred_bb != nullptr);
DCHECK(pred_bb->data_flow_info != nullptr);
DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
} else {
- // Starting state is union of all incoming arcs
- GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
- BasicBlock* pred_bb = GetBasicBlock(iter.Next());
- DCHECK(pred_bb != NULL);
- DCHECK(pred_bb->data_flow_info != NULL);
- while (pred_bb->data_flow_info->ending_check_v == nullptr) {
- pred_bb = GetBasicBlock(iter.Next());
- // At least one predecessor must have been processed before this bb.
+ // Starting state is union of all incoming arcs.
+ bool copied_first = false;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
DCHECK(pred_bb->data_flow_info != nullptr);
- }
- classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
- while (true) {
- pred_bb = GetBasicBlock(iter.Next());
- if (!pred_bb) break;
- DCHECK(pred_bb->data_flow_info != nullptr);
if (pred_bb->data_flow_info->ending_check_v == nullptr) {
continue;
}
- classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
+ if (!copied_first) {
+ copied_first = true;
+ classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ } else {
+ classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
+ }
}
+ DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
}
// At this point, classes_to_check shows which classes need clinit checks.
@@ -1312,8 +1307,8 @@ void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke,
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
DCHECK(inlined_field_info.IsResolved());
- uint32_t field_info_index = ifield_lowering_infos_.Size();
- ifield_lowering_infos_.Insert(inlined_field_info);
+ uint32_t field_info_index = ifield_lowering_infos_.size();
+ ifield_lowering_infos_.push_back(inlined_field_info);
temp_bit_vector_->SetBit(method_index);
temp_insn_data_[method_index] = field_info_index;
iget_or_iput->meta.ifield_lowering_info = field_info_index;
@@ -1321,7 +1316,7 @@ void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke,
bool MIRGraph::InlineSpecialMethodsGate() {
if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 ||
- method_lowering_infos_.Size() == 0u) {
+ method_lowering_infos_.size() == 0u) {
return false;
}
if (cu_->compiler_driver->GetMethodInlinerMap() == nullptr) {
@@ -1337,7 +1332,7 @@ void MIRGraph::InlineSpecialMethodsStart() {
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = method_lowering_infos_.Size();
+ temp_bit_vector_size_ = method_lowering_infos_.size();
temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc);
temp_bit_vector_->ClearAllBits();
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index c510b52..6272332 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -76,8 +76,8 @@ class ClassInitCheckEliminationTest : public testing::Test {
{ opcode, bb, field_info }
void DoPrepareSFields(const SFieldDef* defs, size_t count) {
- cu_.mir_graph->sfield_lowering_infos_.Reset();
- cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const SFieldDef* def = &defs[i];
MirSFieldLoweringInfo field_info(def->field_idx);
@@ -89,7 +89,7 @@ class ClassInitCheckEliminationTest : public testing::Test {
}
ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
ASSERT_FALSE(field_info.IsInitialized());
- cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
}
}
@@ -100,51 +100,43 @@ class ClassInitCheckEliminationTest : public testing::Test {
void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
cu_.mir_graph->block_id_map_.clear();
- cu_.mir_graph->block_list_.Reset();
+ cu_.mir_graph->block_list_.clear();
ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
ASSERT_EQ(kNullBlock, defs[0].type);
ASSERT_EQ(kEntryBlock, defs[1].type);
ASSERT_EQ(kExitBlock, defs[2].type);
for (size_t i = 0u; i != count; ++i) {
const BBDef* def = &defs[i];
- BasicBlock* bb = cu_.mir_graph->NewMemBB(def->type, i);
- cu_.mir_graph->block_list_.Insert(bb);
+ BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
if (def->num_successors <= 2) {
bb->successor_block_list_type = kNotUsed;
- bb->successor_blocks = nullptr;
bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
} else {
bb->successor_block_list_type = kPackedSwitch;
bb->fall_through = 0u;
bb->taken = 0u;
- bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
- &cu_.arena, def->num_successors, kGrowableArraySuccessorBlocks);
+ bb->successor_blocks.reserve(def->num_successors);
for (size_t j = 0u; j != def->num_successors; ++j) {
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
successor_block_info->block = j;
successor_block_info->key = 0u; // Not used by class init check elimination.
- bb->successor_blocks->Insert(successor_block_info);
+ bb->successor_blocks.push_back(successor_block_info);
}
}
- bb->predecessors = new (&cu_.arena) GrowableArray<BasicBlockId>(
- &cu_.arena, def->num_predecessors, kGrowableArrayPredecessors);
- for (size_t j = 0u; j != def->num_predecessors; ++j) {
- ASSERT_NE(0u, def->predecessors[j]);
- bb->predecessors->Insert(def->predecessors[j]);
- }
+ bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
}
}
cu_.mir_graph->num_blocks_ = count;
- ASSERT_EQ(count, cu_.mir_graph->block_list_.Size());
- cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_.Get(1);
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
- cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_.Get(2);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
}
@@ -161,11 +153,11 @@ class ClassInitCheckEliminationTest : public testing::Test {
const MIRDef* def = &defs[i];
MIR* mir = &mirs_[i];
mir->dalvikInsn.opcode = def->opcode;
- ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.Size());
- BasicBlock* bb = cu_.mir_graph->block_list_.Get(def->bbid);
+ ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+ BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
bb->AppendMIR(mir);
if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
- ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+ ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.size());
mir->meta.sfield_lowering_info = def->field_or_method_info;
}
mir->ssa_rep = nullptr;
@@ -408,12 +400,10 @@ TEST_F(ClassInitCheckEliminationTest, Catch) {
// Add successor block info to the check block.
BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
check_bb->successor_block_list_type = kCatch;
- check_bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
- &cu_.arena, 2, kGrowableArraySuccessorBlocks);
SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
(cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
successor_block_info->block = catch_handler->id;
- check_bb->successor_blocks->Insert(successor_block_info);
+ check_bb->successor_blocks.push_back(successor_block_info);
PrepareMIRs(mirs);
PerformClassInitCheckElimination();
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index b0b0606..18155d1 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -64,7 +64,7 @@ LLVMInfo::~LLVMInfo() {
}
::llvm::Value* MirConverter::GetLLVMValue(int s_reg) {
- return llvm_values_.Get(s_reg);
+ return llvm_values_[s_reg];
}
void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) {
@@ -87,7 +87,7 @@ void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) {
}
placeholder->replaceAllUsesWith(val);
val->takeName(placeholder);
- llvm_values_.Put(s_reg, val);
+ llvm_values_[s_reg] = val;
::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder);
DCHECK(inst != NULL);
inst->eraseFromParent();
@@ -1740,15 +1740,12 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) {
art::llvm::IntrinsicHelper::CatchTargets);
::llvm::Value* switch_key =
irb_->CreateCall(intr, irb_->getInt32(mir->offset));
- GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks);
// New basic block to use for work half
::llvm::BasicBlock* work_bb =
::llvm::BasicBlock::Create(*context_, "", func_);
::llvm::SwitchInst* sw =
- irb_->CreateSwitch(switch_key, work_bb, bb->successor_blocks->Size());
- while (true) {
- SuccessorBlockInfo *successor_block_info = iter.Next();
- if (successor_block_info == NULL) break;
+ irb_->CreateSwitch(switch_key, work_bb, bb->successor_blocks.size());
+ for (SuccessorBlockInfo *successor_block_info : bb->successor_blocks) {
::llvm::BasicBlock *target =
GetLLVMBlock(successor_block_info->block);
int type_index = successor_block_info->key;
@@ -1908,18 +1905,18 @@ void MirConverter::MethodMIR2Bitcode() {
::llvm::Value* val;
RegLocation rl_temp = mir_graph_->reg_location_[i];
if ((mir_graph_->SRegToVReg(i) < 0) || rl_temp.high_word) {
- llvm_values_.Insert(0);
+ llvm_values_.push_back(0);
} else if ((i < mir_graph_->GetFirstInVR()) ||
(i >= (mir_graph_->GetFirstTempVR()))) {
::llvm::Constant* imm_value = mir_graph_->reg_location_[i].wide ?
irb_->getJLong(0) : irb_->getJInt(0);
val = EmitConst(imm_value, mir_graph_->reg_location_[i]);
val->setName(mir_graph_->GetSSAName(i));
- llvm_values_.Insert(val);
+ llvm_values_.push_back(val);
} else {
// Recover previously-created argument values
::llvm::Value* arg_val = arg_iter++;
- llvm_values_.Insert(arg_val);
+ llvm_values_.push_back(arg_val);
}
}
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index e6dee5d..94ae3f7 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -31,6 +31,7 @@
#include "llvm/intrinsic_helper.h"
#include "llvm/llvm_compilation_unit.h"
#include "safe_map.h"
+#include "utils/arena_containers.h"
namespace llvm {
class Module;
@@ -104,9 +105,10 @@ class MirConverter : public Backend {
placeholder_bb_(NULL),
entry_bb_(NULL),
entry_target_bb_(NULL),
- llvm_values_(arena, mir_graph->GetNumSSARegs()),
+ llvm_values_(arena->Adapter()),
temp_name_(0),
current_dalvik_offset_(0) {
+ llvm_values_.reserve(mir_graph->GetNumSSARegs());
if (kIsDebugBuild) {
cu->enable_debug |= (1 << kDebugVerifyBitcode);
}
@@ -228,7 +230,7 @@ class MirConverter : public Backend {
::llvm::BasicBlock* entry_bb_;
::llvm::BasicBlock* entry_target_bb_;
std::string bitcode_filename_;
- GrowableArray< ::llvm::Value*> llvm_values_;
+ ArenaVector< ::llvm::Value*> llvm_values_;
int32_t temp_name_;
SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map_; // block id -> llvm bb.
int current_dalvik_offset_;
diff --git a/compiler/dex/post_opt_passes.cc b/compiler/dex/post_opt_passes.cc
index b3d5c8a..675dbcf 100644
--- a/compiler/dex/post_opt_passes.cc
+++ b/compiler/dex/post_opt_passes.cc
@@ -84,7 +84,7 @@ void CalculatePredecessors::Start(PassDataHolder* data) const {
// First clear all predecessors.
AllNodesIterator first(mir_graph);
for (BasicBlock* bb = first.Next(); bb != nullptr; bb = first.Next()) {
- bb->predecessors->Reset();
+ bb->predecessors.clear();
}
// Now calculate all predecessors.
@@ -100,7 +100,7 @@ void CalculatePredecessors::Start(PassDataHolder* data) const {
// Now iterate through the children to set the predecessor bits.
for (BasicBlock* child = child_iter.Next(); child != nullptr; child = child_iter.Next()) {
- child->predecessors->Insert(bb->id);
+ child->predecessors.push_back(bb->id);
}
}
}
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index fc98d31..f6588fe 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -55,7 +55,7 @@ void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocati
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -104,7 +104,7 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati
uint32_t size = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -158,7 +158,7 @@ void ArmMir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 0be478d..aaf4449 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -568,16 +568,16 @@ Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
}
void ArmMir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */, sp_regs,
- dp_regs, reserved_regs, empty_pool /* reserved64 */,
- core_temps, empty_pool /* core64_temps */, sp_temps,
- dp_temps);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
+ sp_regs, dp_regs,
+ reserved_regs, empty_pool /* reserved64 */,
+ core_temps, empty_pool /* core64_temps */,
+ sp_temps, dp_temps));
// Target-specific adjustments.
// Alias single precision floats to appropriate half of overlapping double.
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
int dp_reg_num = sp_reg_num >> 1;
RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
@@ -784,8 +784,7 @@ RegStorage ArmMir2Lir::AllocPreservedDouble(int s_reg) {
* TODO: until runtime support is in, make sure we avoid promoting the same vreg to
* different underlying physical registers.
*/
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->dp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->dp_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
info->MarkInUse();
@@ -809,8 +808,7 @@ RegStorage ArmMir2Lir::AllocPreservedDouble(int s_reg) {
// Reserve a callee-save sp single register.
RegStorage ArmMir2Lir::AllocPreservedSingle(int s_reg) {
RegStorage res;
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
int p_map_idx = SRegToPMap(s_reg);
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index b9c0990..6081f28 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -55,7 +55,7 @@ void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLoca
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -108,7 +108,7 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca
uint32_t size = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -167,7 +167,7 @@ void Arm64Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocatio
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index d7d5651..fe0554c 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -602,14 +602,13 @@ Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph
}
void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, core64_regs, sp_regs, dp_regs,
- reserved_regs, reserved64_regs, core_temps, core64_temps,
- sp_temps, dp_temps);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, core64_regs, sp_regs, dp_regs,
+ reserved_regs, reserved64_regs,
+ core_temps, core64_temps, sp_temps, dp_temps));
// Target-specific adjustments.
// Alias single precision float registers to corresponding double registers.
- GrowableArray<RegisterInfo*>::Iterator fp_it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = fp_it.Next(); info != nullptr; info = fp_it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int fp_reg_num = info->GetReg().GetRegNum();
RegStorage dp_reg = RegStorage::FloatSolo64(fp_reg_num);
RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
@@ -622,8 +621,7 @@ void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
}
// Alias 32bit W registers to corresponding 64bit X registers.
- GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
- for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
int x_reg_num = info->GetReg().GetRegNum();
RegStorage x_reg = RegStorage::Solo64(x_reg_num);
RegisterInfo* x_reg_info = GetRegInfo(x_reg);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index d90bce1..bd2a942 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -530,10 +530,7 @@ void Mir2Lir::InstallLiteralPools() {
/* Write the switch tables to the output stream */
void Mir2Lir::InstallSwitchTables() {
- GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
- while (true) {
- Mir2Lir::SwitchTable* tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
AlignBuffer(code_buffer_, tab_rec->offset);
/*
* For Arm, our reference point is the address of the bx
@@ -590,10 +587,7 @@ void Mir2Lir::InstallSwitchTables() {
/* Write the fill array dta to the output stream */
void Mir2Lir::InstallFillArrayData() {
- GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
- while (true) {
- Mir2Lir::FillArrayData *tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::FillArrayData* tab_rec : fill_array_data_) {
AlignBuffer(code_buffer_, tab_rec->offset);
for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
code_buffer_.push_back(tab_rec->table[i] & 0xFF);
@@ -801,10 +795,7 @@ int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
}
int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
- GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
- while (true) {
- Mir2Lir::SwitchTable* tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
tab_rec->offset = offset;
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
offset += tab_rec->table[1] * (sizeof(int) * 2);
@@ -818,15 +809,12 @@ int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
}
int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
- GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
- while (true) {
- Mir2Lir::FillArrayData *tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::FillArrayData* tab_rec : fill_array_data_) {
tab_rec->offset = offset;
offset += tab_rec->size;
// word align
offset = RoundUp(offset, 4);
- }
+ }
return offset;
}
@@ -878,10 +866,7 @@ void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
}
void Mir2Lir::ProcessSwitchTables() {
- GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
- while (true) {
- Mir2Lir::SwitchTable *tab_rec = iterator.Next();
- if (tab_rec == NULL) break;
+ for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
MarkPackedCaseLabels(tab_rec);
} else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -1006,18 +991,18 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
first_fixup_(NULL),
cu_(cu),
mir_graph_(mir_graph),
- switch_tables_(arena, 4, kGrowableArraySwitchTables),
- fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
- tempreg_info_(arena, 20, kGrowableArrayMisc),
- reginfo_map_(arena, RegStorage::kMaxRegs, kGrowableArrayMisc),
- pointer_storage_(arena, 128, kGrowableArrayMisc),
+ switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
+ fill_array_data_(arena->Adapter(kArenaAllocFillArrayData)),
+ tempreg_info_(arena->Adapter()),
+ reginfo_map_(arena->Adapter()),
+ pointer_storage_(arena->Adapter()),
data_offset_(0),
total_size_(0),
block_label_list_(NULL),
promotion_map_(NULL),
current_dalvik_offset_(0),
estimated_native_code_size_(0),
- reg_pool_(NULL),
+ reg_pool_(nullptr),
live_sreg_(0),
core_vmap_table_(mir_graph->GetArena()->Adapter()),
fp_vmap_table_(mir_graph->GetArena()->Adapter()),
@@ -1028,9 +1013,15 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
fp_spill_mask_(0),
first_lir_insn_(NULL),
last_lir_insn_(NULL),
- slow_paths_(arena, 32, kGrowableArraySlowPaths),
+ slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena) {
+ switch_tables_.reserve(4);
+ fill_array_data_.reserve(4);
+ tempreg_info_.reserve(20);
+ reginfo_map_.reserve(RegStorage::kMaxRegs);
+ pointer_storage_.reserve(128);
+ slow_paths_.reserve(32);
// Reserve pointer id 0 for NULL.
size_t null_idx = WrapPointer(NULL);
DCHECK_EQ(null_idx, 0U);
@@ -1223,7 +1214,7 @@ LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStor
}
void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) {
- slow_paths_.Insert(slowpath);
+ slow_paths_.push_back(slowpath);
ResetDefTracking();
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index fbe710b..9f7a881 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -757,11 +757,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Typ
void Mir2Lir::HandleSlowPaths() {
// We should check slow_paths_.Size() every time, because a new slow path
// may be created during slowpath->Compile().
- for (size_t i = 0; i < slow_paths_.Size(); ++i) {
- LIRSlowPath* slowpath = slow_paths_.Get(i);
+ for (LIRSlowPath* slowpath : slow_paths_) {
slowpath->Compile();
}
- slow_paths_.Reset();
+ slow_paths_.clear();
}
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index f3edd7e..6536c41 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -74,7 +74,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
int elements = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// The table is composed of 8-byte key/disp pairs
int byte_size = elements * 8;
@@ -151,7 +151,7 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -232,7 +232,7 @@ void MipsMir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 9c4426f..d3719ab 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -430,16 +430,16 @@ bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
}
void MipsMir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */, sp_regs,
- dp_regs, reserved_regs, empty_pool /* reserved64 */,
- core_temps, empty_pool /* core64_temps */, sp_temps,
- dp_temps);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
+ sp_regs, dp_regs,
+ reserved_regs, empty_pool /* reserved64 */,
+ core_temps, empty_pool /* core64_temps */,
+ sp_temps, dp_temps));
// Target-specific adjustments.
// Alias single precision floats to appropriate half of overlapping double.
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
#if (FR_BIT == 0)
int dp_reg_num = sp_reg_num & ~1;
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 22588f3..0aefc2d 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -142,8 +142,9 @@ inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1
*/
inline void Mir2Lir::SetupRegMask(ResourceMask* mask, int reg) {
DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
- DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
- *mask = mask->Union(reginfo_map_.Get(reg)->DefUseMask());
+ DCHECK_LT(static_cast<size_t>(reg), reginfo_map_.size());
+ DCHECK(reginfo_map_[reg] != nullptr) << "No info for 0x" << reg;
+ *mask = mask->Union(reginfo_map_[reg]->DefUseMask());
}
/*
@@ -151,8 +152,9 @@ inline void Mir2Lir::SetupRegMask(ResourceMask* mask, int reg) {
*/
inline void Mir2Lir::ClearRegMask(ResourceMask* mask, int reg) {
DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
- DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
- *mask = mask->ClearBits(reginfo_map_.Get(reg)->DefUseMask());
+ DCHECK_LT(static_cast<size_t>(reg), reginfo_map_.size());
+ DCHECK(reginfo_map_[reg] != nullptr) << "No info for 0x" << reg;
+ *mask = mask->ClearBits(reginfo_map_[reg]->DefUseMask());
}
/*
@@ -256,8 +258,7 @@ inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
}
inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(RegStorage reg) {
- RegisterInfo* res = reg.IsPair() ? reginfo_map_.Get(reg.GetLowReg()) :
- reginfo_map_.Get(reg.GetReg());
+ RegisterInfo* res = reg.IsPair() ? reginfo_map_[reg.GetLowReg()] : reginfo_map_[reg.GetReg()];
DCHECK(res != nullptr);
return res;
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 96f00e7..6942c0f 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1268,13 +1268,12 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
- int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
+ DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
BasicBlock*bb = NULL;
- for (int idx = 0; idx < num_reachable_blocks; idx++) {
- // TODO: no direct access of growable lists.
- int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
- bb = mir_graph_->GetBasicBlock(dfs_index);
- if (bb->block_type == kDalvikByteCode) {
+ for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
+ BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
+ if (candidate->block_type == kDalvikByteCode) {
+ bb = candidate;
break;
}
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index ea722ab..01aa11d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -33,7 +33,6 @@
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
#include "utils/arena_containers.h"
-#include "utils/growable_array.h"
#include "utils/stack_checks.h"
namespace art {
@@ -437,20 +436,21 @@ class Mir2Lir : public Backend {
static void* operator new(size_t size, ArenaAllocator* arena) {
return arena->Alloc(size, kArenaAllocRegAlloc);
}
+ static void operator delete(void* ptr) { UNUSED(ptr); }
void ResetNextTemp() {
next_core_reg_ = 0;
next_sp_reg_ = 0;
next_dp_reg_ = 0;
}
- GrowableArray<RegisterInfo*> core_regs_;
+ ArenaVector<RegisterInfo*> core_regs_;
int next_core_reg_;
- GrowableArray<RegisterInfo*> core64_regs_;
+ ArenaVector<RegisterInfo*> core64_regs_;
int next_core64_reg_;
- GrowableArray<RegisterInfo*> sp_regs_; // Single precision float.
+ ArenaVector<RegisterInfo*> sp_regs_; // Single precision float.
int next_sp_reg_;
- GrowableArray<RegisterInfo*> dp_regs_; // Double precision float.
+ ArenaVector<RegisterInfo*> dp_regs_; // Double precision float.
int next_dp_reg_;
- GrowableArray<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_
+ ArenaVector<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_
int* next_ref_reg_;
private:
@@ -597,13 +597,13 @@ class Mir2Lir : public Backend {
* may be worth conditionally-compiling a set of identity functions here.
*/
uint32_t WrapPointer(void* pointer) {
- uint32_t res = pointer_storage_.Size();
- pointer_storage_.Insert(pointer);
+ uint32_t res = pointer_storage_.size();
+ pointer_storage_.push_back(pointer);
return res;
}
void* UnwrapPointer(size_t index) {
- return pointer_storage_.Get(index);
+ return pointer_storage_[index];
}
// strdup(), but allocates from the arena.
@@ -713,7 +713,7 @@ class Mir2Lir : public Backend {
void SimpleRegAlloc();
void ResetRegPool();
void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num);
- void DumpRegPool(GrowableArray<RegisterInfo*>* regs);
+ void DumpRegPool(ArenaVector<RegisterInfo*>* regs);
void DumpCoreRegPool();
void DumpFpRegPool();
void DumpRegPools();
@@ -728,7 +728,7 @@ class Mir2Lir : public Backend {
RegStorage AllocPreservedFpReg(int s_reg);
virtual RegStorage AllocPreservedSingle(int s_reg);
virtual RegStorage AllocPreservedDouble(int s_reg);
- RegStorage AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required);
+ RegStorage AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required);
virtual RegStorage AllocTemp(bool required = true);
virtual RegStorage AllocTempWide(bool required = true);
virtual RegStorage AllocTempRef(bool required = true);
@@ -739,7 +739,7 @@ class Mir2Lir : public Backend {
void FlushReg(RegStorage reg);
void FlushRegWide(RegStorage reg);
RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
- RegStorage FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg);
+ RegStorage FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg);
virtual void FreeTemp(RegStorage reg);
virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
virtual bool IsLive(RegStorage reg);
@@ -1676,11 +1676,11 @@ class Mir2Lir : public Backend {
protected:
CompilationUnit* const cu_;
MIRGraph* const mir_graph_;
- GrowableArray<SwitchTable*> switch_tables_;
- GrowableArray<FillArrayData*> fill_array_data_;
- GrowableArray<RegisterInfo*> tempreg_info_;
- GrowableArray<RegisterInfo*> reginfo_map_;
- GrowableArray<void*> pointer_storage_;
+ ArenaVector<SwitchTable*> switch_tables_;
+ ArenaVector<FillArrayData*> fill_array_data_;
+ ArenaVector<RegisterInfo*> tempreg_info_;
+ ArenaVector<RegisterInfo*> reginfo_map_;
+ ArenaVector<void*> pointer_storage_;
CodeOffset current_code_offset_; // Working byte offset of machine instructons.
CodeOffset data_offset_; // starting offset of literal pool.
size_t total_size_; // header + code size.
@@ -1697,7 +1697,7 @@ class Mir2Lir : public Backend {
*/
DexOffset current_dalvik_offset_;
size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
- RegisterPool* reg_pool_;
+ std::unique_ptr<RegisterPool> reg_pool_;
/*
* Sanity checking for the register temp tracking. The same ssa
* name should never be associated with one temp register per
@@ -1720,7 +1720,7 @@ class Mir2Lir : public Backend {
LIR* first_lir_insn_;
LIR* last_lir_insn_;
- GrowableArray<LIRSlowPath*> slow_paths_;
+ ArenaVector<LIRSlowPath*> slow_paths_;
// The memory reference type for new LIRs.
// NOTE: Passing this as an explicit parameter by all functions that directly or indirectly
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 195da0d..b171c78 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -28,8 +28,7 @@ namespace art {
* live until it is either explicitly killed or reallocated.
*/
void Mir2Lir::ResetRegPool() {
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
info->MarkFree();
}
// Reset temp tracking sanity check.
@@ -66,41 +65,38 @@ Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
const ArrayRef<const RegStorage>& core64_temps,
const ArrayRef<const RegStorage>& sp_temps,
const ArrayRef<const RegStorage>& dp_temps) :
- core_regs_(arena, core_regs.size()), next_core_reg_(0),
- core64_regs_(arena, core64_regs.size()), next_core64_reg_(0),
- sp_regs_(arena, sp_regs.size()), next_sp_reg_(0),
- dp_regs_(arena, dp_regs.size()), next_dp_reg_(0), m2l_(m2l) {
+ core_regs_(arena->Adapter()), next_core_reg_(0),
+ core64_regs_(arena->Adapter()), next_core64_reg_(0),
+ sp_regs_(arena->Adapter()), next_sp_reg_(0),
+ dp_regs_(arena->Adapter()), next_dp_reg_(0), m2l_(m2l) {
// Initialize the fast lookup map.
- m2l_->reginfo_map_.Reset();
- if (kIsDebugBuild) {
- m2l_->reginfo_map_.Resize(RegStorage::kMaxRegs);
- for (unsigned i = 0; i < RegStorage::kMaxRegs; i++) {
- m2l_->reginfo_map_.Insert(nullptr);
- }
- } else {
- m2l_->reginfo_map_.SetSize(RegStorage::kMaxRegs);
- }
+ m2l_->reginfo_map_.clear();
+ m2l_->reginfo_map_.resize(RegStorage::kMaxRegs, nullptr);
// Construct the register pool.
+ core_regs_.reserve(core_regs.size());
for (const RegStorage& reg : core_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- core_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ core_regs_.push_back(info);
}
+ core64_regs_.reserve(core64_regs.size());
for (const RegStorage& reg : core64_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- core64_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ core64_regs_.push_back(info);
}
+ sp_regs_.reserve(sp_regs.size());
for (const RegStorage& reg : sp_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- sp_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ sp_regs_.push_back(info);
}
+ dp_regs_.reserve(dp_regs.size());
for (const RegStorage& reg : dp_regs) {
RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
- m2l_->reginfo_map_.Put(reg.GetReg(), info);
- dp_regs_.Insert(info);
+ m2l_->reginfo_map_[reg.GetReg()] = info;
+ dp_regs_.push_back(info);
}
// Keep special registers from being allocated.
@@ -127,10 +123,10 @@ Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
// Add an entry for InvalidReg with zero'd mask.
RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), kEncodeNone);
- m2l_->reginfo_map_.Put(RegStorage::InvalidReg().GetReg(), invalid_reg);
+ m2l_->reginfo_map_[RegStorage::InvalidReg().GetReg()] = invalid_reg;
// Existence of core64 registers implies wide references.
- if (core64_regs_.Size() != 0) {
+ if (core64_regs_.size() != 0) {
ref_regs_ = &core64_regs_;
next_ref_reg_ = &next_core64_reg_;
} else {
@@ -139,10 +135,9 @@ Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
}
}
-void Mir2Lir::DumpRegPool(GrowableArray<RegisterInfo*>* regs) {
+void Mir2Lir::DumpRegPool(ArenaVector<RegisterInfo*>* regs) {
LOG(INFO) << "================================================";
- GrowableArray<RegisterInfo*>::Iterator it(regs);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : *regs) {
LOG(INFO) << StringPrintf(
"R[%d:%d:%c]: T:%d, U:%d, W:%d, p:%d, LV:%d, D:%d, SR:%d, DEF:%d",
info->GetReg().GetReg(), info->GetReg().GetRegNum(), info->GetReg().IsFloat() ? 'f' : 'c',
@@ -222,8 +217,7 @@ void Mir2Lir::ClobberSReg(int s_reg) {
if (kIsDebugBuild && s_reg == live_sreg_) {
live_sreg_ = INVALID_SREG;
}
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
if (info->SReg() == s_reg) {
if (info->GetReg().NotExactlyEquals(info->Partner())) {
// Dealing with a pair - clobber the other half.
@@ -278,8 +272,7 @@ RegStorage Mir2Lir::AllocPreservedCoreReg(int s_reg) {
* happens from the single or double pool. This entire section of code could stand
* a good refactoring.
*/
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->core_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
RecordCorePromotion(res, s_reg);
@@ -311,8 +304,7 @@ RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
*/
DCHECK_NE(cu_->instruction_set, kThumb2);
RegStorage res;
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
if (!info->IsTemp() && !info->InUse()) {
res = info->GetReg();
RecordFpPromotion(res, s_reg);
@@ -337,13 +329,14 @@ RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
}
-RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required) {
- int num_regs = regs.Size();
+RegStorage Mir2Lir::AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required) {
+ int num_regs = regs.size();
int next = *next_temp;
for (int i = 0; i< num_regs; i++) {
- if (next >= num_regs)
+ if (next >= num_regs) {
next = 0;
- RegisterInfo* info = regs.Get(next);
+ }
+ RegisterInfo* info = regs[next];
// Try to allocate a register that doesn't hold a live value.
if (info->IsTemp() && !info->InUse() && info->IsDead()) {
// If it's wide, split it up.
@@ -367,9 +360,10 @@ RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_
next = *next_temp;
// No free non-live regs. Anything we can kill?
for (int i = 0; i< num_regs; i++) {
- if (next >= num_regs)
+ if (next >= num_regs) {
next = 0;
- RegisterInfo* info = regs.Get(next);
+ }
+ RegisterInfo* info = regs[next];
if (info->IsTemp() && !info->InUse()) {
// Got one. Kill it.
ClobberSReg(info->SReg());
@@ -401,7 +395,7 @@ RegStorage Mir2Lir::AllocTemp(bool required) {
RegStorage Mir2Lir::AllocTempWide(bool required) {
RegStorage res;
- if (reg_pool_->core64_regs_.Size() != 0) {
+ if (reg_pool_->core64_regs_.size() != 0) {
res = AllocTempBody(reg_pool_->core64_regs_, &reg_pool_->next_core64_reg_, required);
} else {
RegStorage low_reg = AllocTemp();
@@ -458,10 +452,9 @@ RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class, bool required) {
return AllocTemp(required);
}
-RegStorage Mir2Lir::FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg) {
+RegStorage Mir2Lir::FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg) {
RegStorage res;
- GrowableArray<RegisterInfo*>::Iterator it(&regs);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : regs) {
if ((info->SReg() == s_reg) && info->IsLive()) {
res = info->GetReg();
break;
@@ -714,15 +707,13 @@ void Mir2Lir::ResetDefLocWide(RegLocation rl) {
}
void Mir2Lir::ResetDefTracking() {
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
info->ResetDefBody();
}
}
void Mir2Lir::ClobberAllTemps() {
- GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
- for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
ClobberBody(info);
}
}
@@ -780,8 +771,7 @@ void Mir2Lir::FlushSpecificReg(RegisterInfo* info) {
}
void Mir2Lir::FlushAllRegs() {
- GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
if (info->IsDirty() && info->IsLive()) {
FlushSpecificReg(info);
}
@@ -853,14 +843,16 @@ void Mir2Lir::MarkLive(RegLocation loc) {
void Mir2Lir::MarkTemp(RegStorage reg) {
DCHECK(!reg.IsPair());
RegisterInfo* info = GetRegInfo(reg);
- tempreg_info_.Insert(info);
+ tempreg_info_.push_back(info);
info->SetIsTemp(true);
}
void Mir2Lir::UnmarkTemp(RegStorage reg) {
DCHECK(!reg.IsPair());
RegisterInfo* info = GetRegInfo(reg);
- tempreg_info_.Delete(info);
+ auto pos = std::find(tempreg_info_.begin(), tempreg_info_.end(), info);
+ DCHECK(pos != tempreg_info_.end());
+ tempreg_info_.erase(pos);
info->SetIsTemp(false);
}
@@ -932,8 +924,7 @@ void Mir2Lir::MarkInUse(RegStorage reg) {
}
bool Mir2Lir::CheckCorePoolSanity() {
- GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : tempreg_info_) {
int my_sreg = info->SReg();
if (info->IsTemp() && info->IsLive() && info->IsWide() && my_sreg != INVALID_SREG) {
RegStorage my_reg = info->GetReg();
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 482c430..5b92512 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -73,7 +73,7 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
kArenaAllocLIR));
- switch_tables_.Insert(tab_rec);
+ switch_tables_.push_back(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -145,7 +145,7 @@ void X86Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- fill_array_data_.Insert(tab_rec);
+ fill_array_data_.push_back(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 6020e70..80da962 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -947,13 +947,13 @@ class X86Mir2Lir : public Mir2Lir {
LIR* setup_method_address_[2];
// Instructions needing patching with Method* values.
- GrowableArray<LIR*> method_address_insns_;
+ ArenaVector<LIR*> method_address_insns_;
// Instructions needing patching with Class Type* values.
- GrowableArray<LIR*> class_type_address_insns_;
+ ArenaVector<LIR*> class_type_address_insns_;
// Instructions needing patching with PC relative code addresses.
- GrowableArray<LIR*> call_method_insns_;
+ ArenaVector<LIR*> call_method_insns_;
// Prologue decrement of stack pointer.
LIR* stack_decrement_;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index de11996..d3eafc9 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -620,13 +620,15 @@ bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
void X86Mir2Lir::CompilerInitializeRegAlloc() {
if (cu_->target64) {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
- dp_regs_64, reserved_regs_64, reserved_regs_64q,
- core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
+ dp_regs_64, reserved_regs_64, reserved_regs_64q,
+ core_temps_64, core_temps_64q,
+ sp_temps_64, dp_temps_64));
} else {
- reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
- dp_regs_32, reserved_regs_32, empty_pool,
- core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
+ dp_regs_32, reserved_regs_32, empty_pool,
+ core_temps_32, empty_pool,
+ sp_temps_32, dp_temps_32));
}
// Target-specific adjustments.
@@ -635,7 +637,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32;
for (RegStorage reg : *xp_regs) {
RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
- reginfo_map_.Put(reg.GetReg(), info);
+ reginfo_map_[reg.GetReg()] = info;
}
const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
for (RegStorage reg : *xp_temps) {
@@ -645,8 +647,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
// Alias single precision xmm to double xmms.
// TODO: as needed, add larger vector sizes - alias all to the largest.
- GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
- for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
@@ -666,8 +667,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
if (cu_->target64) {
// Alias 32bit W registers to corresponding 64bit X registers.
- GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
- for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
int x_reg_num = info->GetReg().GetRegNum();
RegStorage x_reg = RegStorage::Solo64(x_reg_num);
RegisterInfo* x_reg_info = GetRegInfo(x_reg);
@@ -785,11 +785,14 @@ RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatil
X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena),
base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
- method_address_insns_(arena, 100, kGrowableArrayMisc),
- class_type_address_insns_(arena, 100, kGrowableArrayMisc),
- call_method_insns_(arena, 100, kGrowableArrayMisc),
+ method_address_insns_(arena->Adapter()),
+ class_type_address_insns_(arena->Adapter()),
+ call_method_insns_(arena->Adapter()),
stack_decrement_(nullptr), stack_increment_(nullptr),
const_vectors_(nullptr) {
+ method_address_insns_.reserve(100);
+ class_type_address_insns_.reserve(100);
+ call_method_insns_.reserve(100);
store_method_addr_used_ = false;
if (kIsDebugBuild) {
for (int i = 0; i < kX86Last; i++) {
@@ -977,7 +980,7 @@ void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeT
static_cast<int>(target_method_id_ptr), target_method_idx,
WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(move);
- method_address_insns_.Insert(move);
+ method_address_insns_.push_back(move);
}
void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
@@ -996,7 +999,7 @@ void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
static_cast<int>(ptr), type_idx,
WrapPointer(const_cast<DexFile*>(&dex_file)));
AppendLIR(move);
- class_type_address_insns_.Insert(move);
+ class_type_address_insns_.push_back(move);
}
LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
@@ -1014,7 +1017,7 @@ LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, Invok
LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(call);
- call_method_insns_.Insert(call);
+ call_method_insns_.push_back(call);
return call;
}
@@ -1045,8 +1048,7 @@ void X86Mir2Lir::InstallLiteralPools() {
}
// Handle the fixups for methods.
- for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
- LIR* p = method_address_insns_.Get(i);
+ for (LIR* p : method_address_insns_) {
DCHECK_EQ(p->opcode, kX86Mov32RI);
uint32_t target_method_idx = p->operands[2];
const DexFile* target_dex_file =
@@ -1062,8 +1064,7 @@ void X86Mir2Lir::InstallLiteralPools() {
}
// Handle the fixups for class types.
- for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
- LIR* p = class_type_address_insns_.Get(i);
+ for (LIR* p : class_type_address_insns_) {
DCHECK_EQ(p->opcode, kX86Mov32RI);
const DexFile* class_dex_file =
@@ -1078,8 +1079,7 @@ void X86Mir2Lir::InstallLiteralPools() {
}
// And now the PC-relative calls to methods.
- for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
- LIR* p = call_method_insns_.Get(i);
+ for (LIR* p : call_method_insns_) {
DCHECK_EQ(p->opcode, kX86CallI);
uint32_t target_method_idx = p->operands[1];
const DexFile* target_dex_file =
@@ -1577,11 +1577,11 @@ void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
for (RegisterInfo *info = xp_reg_info->GetAliasChain();
info != nullptr;
info = info->GetAliasChain()) {
- if (info->GetReg().IsSingle()) {
- reg_pool_->sp_regs_.Delete(info);
- } else {
- reg_pool_->dp_regs_.Delete(info);
- }
+ ArenaVector<RegisterInfo*>* regs =
+ info->GetReg().IsSingle() ? &reg_pool_->sp_regs_ : &reg_pool_->dp_regs_;
+ auto it = std::find(regs->begin(), regs->end(), info);
+ DCHECK(it != regs->end());
+ regs->erase(it);
}
}
}
@@ -1595,9 +1595,9 @@ void X86Mir2Lir::ReturnVectorRegisters(MIR* mir) {
info != nullptr;
info = info->GetAliasChain()) {
if (info->GetReg().IsSingle()) {
- reg_pool_->sp_regs_.Insert(info);
+ reg_pool_->sp_regs_.push_back(info);
} else {
- reg_pool_->dp_regs_.Insert(info);
+ reg_pool_->dp_regs_.push_back(info);
}
}
}
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index a2b9bb7..3cc573b 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -45,12 +45,7 @@ BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
res = NeedsVisit(GetBasicBlock(bb->taken));
if (res == NULL) {
if (bb->successor_block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
- while (true) {
- SuccessorBlockInfo *sbi = iterator.Next();
- if (sbi == NULL) {
- break;
- }
+ for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
res = NeedsVisit(GetBasicBlock(sbi->block));
if (res != NULL) {
break;
@@ -66,7 +61,7 @@ void MIRGraph::MarkPreOrder(BasicBlock* block) {
block->visited = true;
/* Enqueue the pre_order block id */
if (block->id != NullBasicBlockId) {
- dfs_order_->Insert(block->id);
+ dfs_order_.push_back(block->id);
}
}
@@ -83,9 +78,9 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) {
succ.push_back(next_successor);
continue;
}
- curr->dfs_id = dfs_post_order_->Size();
+ curr->dfs_id = dfs_post_order_.size();
if (curr->id != NullBasicBlockId) {
- dfs_post_order_->Insert(curr->id);
+ dfs_post_order_.push_back(curr->id);
}
succ.pop_back();
}
@@ -93,23 +88,11 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) {
/* Sort the blocks by the Depth-First-Search */
void MIRGraph::ComputeDFSOrders() {
- /* Initialize or reset the DFS pre_order list */
- if (dfs_order_ == NULL) {
- dfs_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
- kGrowableArrayDfsOrder);
- } else {
- /* Just reset the used length on the counter */
- dfs_order_->Reset();
- }
-
- /* Initialize or reset the DFS post_order list */
- if (dfs_post_order_ == NULL) {
- dfs_post_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
- kGrowableArrayDfsPostOrder);
- } else {
- /* Just reset the used length on the counter */
- dfs_post_order_->Reset();
- }
+ /* Clear the DFS pre-order and post-order lists. */
+ dfs_order_.clear();
+ dfs_order_.reserve(GetNumBlocks());
+ dfs_post_order_.clear();
+ dfs_post_order_.reserve(GetNumBlocks());
// Reset visited flags from all nodes
ClearAllVisitedFlags();
@@ -117,7 +100,7 @@ void MIRGraph::ComputeDFSOrders() {
// Record dfs orders
RecordDFSOrders(GetEntryBlock());
- num_reachable_blocks_ = dfs_order_->Size();
+ num_reachable_blocks_ = dfs_order_.size();
if (num_reachable_blocks_ != num_blocks_) {
// Hide all unreachable blocks.
@@ -181,14 +164,10 @@ void MIRGraph::ComputeDefBlockMatrix() {
}
void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
- if (dom_post_order_traversal_ == NULL || max_num_reachable_blocks_ < num_reachable_blocks_) {
- // First time or too small - create the array.
- dom_post_order_traversal_ =
- new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
- kGrowableArrayDomPostOrderTraversal);
- } else {
- dom_post_order_traversal_->Reset();
- }
+ // Clear the dominator post-order list.
+ dom_post_order_traversal_.clear();
+ dom_post_order_traversal_.reserve(num_reachable_blocks_);
+
ClearAllVisitedFlags();
DCHECK(temp_scoped_alloc_.get() != nullptr);
ScopedArenaVector<std::pair<BasicBlock*, ArenaBitVector::IndexIterator>> work_stack(
@@ -211,7 +190,7 @@ void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
} else {
// no successor/next
if (curr_bb->id != NullBasicBlockId) {
- dom_post_order_traversal_->Insert(curr_bb->id);
+ dom_post_order_traversal_.push_back(curr_bb->id);
}
work_stack.pop_back();
@@ -247,15 +226,10 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
CheckForDominanceFrontier(bb, GetBasicBlock(bb->fall_through));
}
if (bb->successor_block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
- while (true) {
- SuccessorBlockInfo *successor_block_info = iterator.Next();
- if (successor_block_info == NULL) {
- break;
- }
- BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
- CheckForDominanceFrontier(bb, succ_bb);
- }
+ for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
+ BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
+ CheckForDominanceFrontier(bb, succ_bb);
+ }
}
/* Calculate DF_up */
@@ -319,13 +293,14 @@ bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
}
/* Iterate through the predecessors */
- GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
+ auto it = bb->predecessors.begin(), end = bb->predecessors.end();
/* Find the first processed predecessor */
int idom = -1;
- while (true) {
- BasicBlock* pred_bb = GetBasicBlock(iter.Next());
- CHECK(pred_bb != NULL);
+ for ( ; ; ++it) {
+ CHECK(it != end);
+ BasicBlock* pred_bb = GetBasicBlock(*it);
+ DCHECK(pred_bb != nullptr);
if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
idom = pred_bb->dfs_id;
break;
@@ -333,11 +308,9 @@ bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
}
/* Scan the rest of the predecessors */
- while (true) {
- BasicBlock* pred_bb = GetBasicBlock(iter.Next());
- if (!pred_bb) {
- break;
- }
+ for ( ; it != end; ++it) {
+ BasicBlock* pred_bb = GetBasicBlock(*it);
+ DCHECK(pred_bb != nullptr);
if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
continue;
} else {
@@ -370,7 +343,7 @@ bool MIRGraph::SetDominators(BasicBlock* bb) {
if (bb != GetEntryBlock()) {
int idom_dfs_idx = i_dom_list_[bb->dfs_id];
DCHECK_NE(idom_dfs_idx, NOTVISITED);
- int i_dom_idx = dfs_post_order_->Get(idom_dfs_idx);
+ int i_dom_idx = dfs_post_order_[idom_dfs_idx];
BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
bb->i_dom = i_dom->id;
/* Add bb to the i_dominated set of the immediate dominator block */
@@ -474,12 +447,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
ComputeSuccLineIn(temp_dalvik_register_v, bb_fall_through->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
if (bb->successor_block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
- while (true) {
- SuccessorBlockInfo *successor_block_info = iterator.Next();
- if (successor_block_info == NULL) {
- break;
- }
+ for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
if (succ_bb->data_flow_info) {
ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
@@ -556,8 +524,7 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
int v_reg = SRegToVReg(ssa_reg);
/* Iterate through the predecessors */
- GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
- size_t num_uses = bb->predecessors->Size();
+ size_t num_uses = bb->predecessors.size();
AllocateSSAUseData(mir, num_uses);
int* uses = mir->ssa_rep->uses;
BasicBlockId* incoming =
@@ -565,14 +532,12 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
kArenaAllocDFInfo));
mir->meta.phi_incoming = incoming;
int idx = 0;
- while (true) {
- BasicBlock* pred_bb = GetBasicBlock(iter.Next());
- if (!pred_bb) {
- break;
- }
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
uses[idx] = ssa_reg;
- incoming[idx] = pred_bb->id;
+ incoming[idx] = pred_id;
idx++;
}
}
@@ -607,12 +572,7 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
}
if (block->successor_block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_blocks);
- while (true) {
- SuccessorBlockInfo *successor_block_info = iterator.Next();
- if (successor_block_info == NULL) {
- break;
- }
+ for (SuccessorBlockInfo* successor_block_info : block->successor_blocks) {
BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
DoDFSPreOrderSSARename(succ_bb);
/* Restore SSA map snapshot */
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index da49524..516ac2b 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -35,12 +35,23 @@ template <bool kCount>
const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
"Misc ",
"BasicBlock ",
+ "BBList "
+ "BBPreds ",
+ "DfsPreOrd ",
+ "DfsPostOrd ",
+ "DomPostOrd ",
+ "TopoOrd ",
+ "Lowering ",
"LIR ",
"LIR masks ",
+ "SwitchTbl ",
+ "FillArray ",
+ "SlowPaths ",
"MIR ",
"DataFlow ",
"GrowList ",
"GrowBitMap ",
+ "SSA2Dalvik ",
"Dalvik2SSA ",
"DebugInfo ",
"Successor ",
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 7bfbb6f..b2f5ca9 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -44,12 +44,23 @@ static constexpr bool kArenaAllocatorCountAllocations = false;
enum ArenaAllocKind {
kArenaAllocMisc,
kArenaAllocBB,
+ kArenaAllocBBList,
+ kArenaAllocBBPredecessors,
+ kArenaAllocDfsPreOrder,
+ kArenaAllocDfsPostOrder,
+ kArenaAllocDomPostOrder,
+ kArenaAllocTopologicalSortOrder,
+ kArenaAllocLoweringInfo,
kArenaAllocLIR,
kArenaAllocLIRResourceMask,
+ kArenaAllocSwitchTable,
+ kArenaAllocFillArrayData,
+ kArenaAllocSlowPaths,
kArenaAllocMIR,
kArenaAllocDFInfo,
kArenaAllocGrowableArray,
kArenaAllocGrowableBitMap,
+ kArenaAllocSSAToDalvikMap,
kArenaAllocDalvikToSSAMap,
kArenaAllocDebugInfo,
kArenaAllocSuccessor,
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index a1a3312..5b55b80 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -26,61 +26,14 @@ namespace art {
// Type of growable list for memory tuning.
enum OatListKind {
kGrowableArrayMisc = 0,
- kGrowableArrayBlockList,
- kGrowableArraySSAtoDalvikMap,
- kGrowableArrayDfsOrder,
- kGrowableArrayDfsPostOrder,
- kGrowableArrayDomPostOrderTraversal,
- kGrowableArraySwitchTables,
- kGrowableArrayFillArrayData,
- kGrowableArraySuccessorBlocks,
- kGrowableArrayPredecessors,
- kGrowableArraySlowPaths,
kGNumListKinds
};
+// Deprecated
+// TODO: Replace all uses with ArenaVector<T>.
template<typename T>
class GrowableArray {
public:
- class Iterator {
- public:
- explicit Iterator(GrowableArray* g_list)
- : idx_(0),
- g_list_(g_list) {}
-
- explicit Iterator()
- : idx_(0),
- g_list_(nullptr) {}
-
- // NOTE: returns 0/NULL when no next.
- // TODO: redo to make usage consistent with other iterators.
- T Next() {
- DCHECK(g_list_ != nullptr);
- if (idx_ >= g_list_->Size()) {
- return 0;
- } else {
- return g_list_->Get(idx_++);
- }
- }
-
- void Reset() {
- idx_ = 0;
- }
-
- void Reset(GrowableArray* g_list) {
- idx_ = 0;
- g_list_ = g_list;
- }
-
- size_t GetIndex() const {
- return idx_;
- }
-
- private:
- size_t idx_;
- GrowableArray* g_list_;
- };
-
GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
: arena_(arena),
num_allocated_(init_length),