summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorJean Christophe Beyler <jean.christophe.beyler@intel.com>2014-05-01 15:36:22 -0700
committerJean Christophe Beyler <jean.christophe.beyler@intel.com>2014-05-27 08:58:39 -0700
commit4896d7b6fb75add25f2d6ba84346ac83d8ba9d51 (patch)
treeb3e2da5fa23c984d4ed1f2f83429350edb67f929 /compiler
parent1b1aea22c0cc5567b5967590cb3f949cc45c3a9a (diff)
downloadart-4896d7b6fb75add25f2d6ba84346ac83d8ba9d51.zip
art-4896d7b6fb75add25f2d6ba84346ac83d8ba9d51.tar.gz
art-4896d7b6fb75add25f2d6ba84346ac83d8ba9d51.tar.bz2
ART: Better SSA Allocation when recreating SSA
The SSA calculation is not allocation friendly. This makes the SSARepresentation remember how much is allocated and not reallocate if SSA should be recalculated. Also added some allocation friendly code for the dominance code. Change-Id: Icd5586b7e2fefae8e1535975ab400b1ca95b500f Signed-off-by: Jean Christophe Beyler <jean.christophe.beyler@intel.com>
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/mir_dataflow.cc56
-rw-r--r--compiler/dex/mir_graph.cc4
-rw-r--r--compiler/dex/mir_graph.h18
-rw-r--r--compiler/dex/ssa_transformation.cc20
4 files changed, 55 insertions, 43 deletions
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index ed7e1f5..47b233b 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -953,18 +953,34 @@ void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) {
defs[reg_index] = ssa_reg;
}
+void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
+ mir->ssa_rep->num_uses = num_uses;
+
+ if (mir->ssa_rep->num_uses_allocated < num_uses) {
+ mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, kArenaAllocDFInfo));
+ // NOTE: will be filled in during type & size inference pass
+ mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
+ }
+}
+
+void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
+ mir->ssa_rep->num_defs = num_defs;
+
+ if (mir->ssa_rep->num_defs_allocated < num_defs) {
+ mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
+ kArenaAllocDFInfo));
+ mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
+ kArenaAllocDFInfo));
+ }
+}
+
/* Look up new SSA names for format_35c instructions */
void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
int num_uses = d_insn->vA;
int i;
- mir->ssa_rep->num_uses = num_uses;
- mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- kArenaAllocDFInfo));
- // NOTE: will be filled in during type & size inference pass
- mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
- kArenaAllocDFInfo));
+ AllocateSSAUseData(mir, num_uses);
for (i = 0; i < num_uses; i++) {
HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
@@ -977,12 +993,7 @@ void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) {
int num_uses = d_insn->vA;
int i;
- mir->ssa_rep->num_uses = num_uses;
- mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- kArenaAllocDFInfo));
- // NOTE: will be filled in during type & size inference pass
- mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
- kArenaAllocDFInfo));
+ AllocateSSAUseData(mir, num_uses);
for (i = 0; i < num_uses; i++) {
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
@@ -999,6 +1010,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
kArenaAllocDFInfo));
+ memset(mir->ssa_rep, 0, sizeof(*mir->ssa_rep));
uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -1045,13 +1057,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
}
}
- if (num_uses) {
- mir->ssa_rep->num_uses = num_uses;
- mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- kArenaAllocDFInfo));
- mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
- kArenaAllocDFInfo));
- }
+ AllocateSSAUseData(mir, num_uses);
int num_defs = 0;
@@ -1062,13 +1068,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
}
}
- if (num_defs) {
- mir->ssa_rep->num_defs = num_defs;
- mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
- kArenaAllocDFInfo));
- mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
- kArenaAllocDFInfo));
- }
+ AllocateSSADefData(mir, num_defs);
MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
@@ -1114,11 +1114,11 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
* input to PHI nodes can be derived from the snapshot of all
* predecessor blocks.
*/
- bb->data_flow_info->vreg_to_ssa_map =
+ bb->data_flow_info->vreg_to_ssa_map_exit =
static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers,
kArenaAllocDFInfo));
- memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
+ memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
sizeof(int) * cu_->num_dalvik_registers);
return true;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 24fea71..0fffa01 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -74,6 +74,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
use_counts_(arena, 256, kGrowableArrayMisc),
raw_use_counts_(arena, 256, kGrowableArrayMisc),
num_reachable_blocks_(0),
+ max_num_reachable_blocks_(0),
dfs_order_(NULL),
dfs_post_order_(NULL),
dom_post_order_traversal_(NULL),
@@ -1435,6 +1436,9 @@ void MIRGraph::SSATransformationStart() {
/* Rename register names by local defs and phi nodes */
ClearAllVisitedFlags();
DoDFSPreOrderSSARename(GetEntryBlock());
+
+ // Update the maximum number of reachable blocks.
+ max_num_reachable_blocks_ = num_reachable_blocks_;
}
void MIRGraph::SSATransformationEnd() {
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 53a997e..3655125 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -223,7 +223,7 @@ struct BasicBlockDataFlow {
ArenaBitVector* def_v;
ArenaBitVector* live_in_v;
ArenaBitVector* phi_v;
- int32_t* vreg_to_ssa_map;
+ int32_t* vreg_to_ssa_map_exit;
ArenaBitVector* ending_check_v; // For null check and class init check elimination.
};
@@ -234,14 +234,21 @@ struct BasicBlockDataFlow {
* Following SSA renaming, this is the primary struct used by code generators to locate
* operand and result registers. This is a somewhat confusing and unhelpful convention that
* we may want to revisit in the future.
+ *
+ * TODO:
+ * 1. Add accessors for uses/defs and make data private
+ * 2. Change fp_use/fp_def to a bit array (could help memory usage)
+ * 3. Combine array storage into internal array and handled via accessors from 1.
*/
struct SSARepresentation {
- int16_t num_uses;
- int16_t num_defs;
int32_t* uses;
bool* fp_use;
int32_t* defs;
bool* fp_def;
+ int16_t num_uses_allocated;
+ int16_t num_defs_allocated;
+ int16_t num_uses;
+ int16_t num_defs;
static uint32_t GetStartUseIndex(Instruction::Code opcode);
};
@@ -1020,6 +1027,10 @@ class MIRGraph {
void CombineBlocks(BasicBlock* bb);
void ClearAllVisitedFlags();
+
+ void AllocateSSAUseData(MIR *mir, int num_uses);
+ void AllocateSSADefData(MIR *mir, int num_defs);
+
/*
* IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
* we can verify that all catch entries have native PC entries.
@@ -1105,6 +1116,7 @@ class MIRGraph {
GrowableArray<uint32_t> use_counts_; // Weighted by nesting depth
GrowableArray<uint32_t> raw_use_counts_; // Not weighted
unsigned int num_reachable_blocks_;
+ unsigned int max_num_reachable_blocks_;
GrowableArray<BasicBlockId>* dfs_order_;
GrowableArray<BasicBlockId>* dfs_post_order_;
GrowableArray<BasicBlockId>* dom_post_order_traversal_;
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 6f47b8f..0c5a4ca 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -167,8 +167,8 @@ void MIRGraph::ComputeDefBlockMatrix() {
}
void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
- if (dom_post_order_traversal_ == NULL) {
- // First time - create the array.
+ if (dom_post_order_traversal_ == NULL || max_num_reachable_blocks_ < num_reachable_blocks_) {
+ // First time or too small - create the array.
dom_post_order_traversal_ =
new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
kGrowableArrayDomPostOrderTraversal);
@@ -373,8 +373,8 @@ void MIRGraph::ComputeDominators() {
InitializeDominationInfo(bb);
}
- /* Initalize & Clear i_dom_list */
- if (i_dom_list_ == NULL) {
+ /* Initialize & Clear i_dom_list */
+ if (max_num_reachable_blocks_ < num_reachable_blocks_) {
i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
kArenaAllocDFInfo));
}
@@ -542,12 +542,8 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
/* Iterate through the predecessors */
GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
size_t num_uses = bb->predecessors->Size();
- mir->ssa_rep->num_uses = num_uses;
- int* uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- kArenaAllocDFInfo));
- mir->ssa_rep->uses = uses;
- mir->ssa_rep->fp_use =
- static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
+ AllocateSSAUseData(mir, num_uses);
+ int* uses = mir->ssa_rep->uses;
BasicBlockId* incoming =
static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
kArenaAllocDFInfo));
@@ -556,9 +552,9 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
while (true) {
BasicBlock* pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) {
- break;
+ break;
}
- int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
+ int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
uses[idx] = ssa_reg;
incoming[idx] = pred_bb->id;
idx++;