summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Geoffray <ngeoffray@google.com>2015-05-05 17:02:20 +0100
committerNicolas Geoffray <ngeoffray@google.com>2015-05-07 09:54:09 +0100
commitdb216f4d49ea1561a74261c29f1264952232728a (patch)
tree8b7914435ad1ba519a3d88b5cca7f0f6e842cd4f
parentbc3b93eadd155342b6124d2d5ef3806ecec5dfd6 (diff)
downloadart-db216f4d49ea1561a74261c29f1264952232728a.zip
art-db216f4d49ea1561a74261c29f1264952232728a.tar.gz
art-db216f4d49ea1561a74261c29f1264952232728a.tar.bz2
Relax the only one back-edge restriction.
The rule is in the way for better register allocation, as it creates an artificial join point between multiple paths. Change-Id: Ia4392890f95bcea56d143138f28ddce6c572ad58
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc36
-rw-r--r--compiler/optimizing/code_generator_arm.cc18
-rw-r--r--compiler/optimizing/code_generator_arm64.cc19
-rw-r--r--compiler/optimizing/code_generator_x86.cc20
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc20
-rw-r--r--compiler/optimizing/find_loops_test.cc9
-rw-r--r--compiler/optimizing/graph_checker.cc50
-rw-r--r--compiler/optimizing/liveness_test.cc58
-rw-r--r--compiler/optimizing/nodes.cc89
-rw-r--r--compiler/optimizing/nodes.h28
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc9
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h6
-rw-r--r--compiler/optimizing/ssa_test.cc18
13 files changed, 226 insertions, 154 deletions
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 92fa6db..b2b5496 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -281,15 +281,22 @@ class ArrayAccessInsideLoopFinder : public ValueObject {
return false;
}
+ static bool DominatesAllBackEdges(HBasicBlock* block, HLoopInformation* loop_info) {
+ for (size_t i = 0, e = loop_info->GetBackEdges().Size(); i < e; ++i) {
+ HBasicBlock* back_edge = loop_info->GetBackEdges().Get(i);
+ if (!block->Dominates(back_edge)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
void Run() {
HLoopInformation* loop_info = induction_variable_->GetBlock()->GetLoopInformation();
- // Must be simplified loop.
- DCHECK_EQ(loop_info->GetBackEdges().Size(), 1U);
for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) {
HBasicBlock* block = it_loop.Current();
DCHECK(block->IsInLoop());
- HBasicBlock* back_edge = loop_info->GetBackEdges().Get(0);
- if (!block->Dominates(back_edge)) {
+ if (!DominatesAllBackEdges(block, loop_info)) {
// In order not to trigger deoptimization unnecessarily, make sure
// that all array accesses collected are really executed in the loop.
// For array accesses in a branch inside the loop, don't collect the
@@ -1151,9 +1158,26 @@ class BCEVisitor : public HGraphVisitor {
bounds_check->GetBlock()->RemoveInstruction(bounds_check);
}
+ static bool HasSameInputAtBackEdges(HPhi* phi) {
+ DCHECK(phi->IsLoopHeaderPhi());
+ // Start with input 1. Input 0 is from the incoming block.
+ HInstruction* input1 = phi->InputAt(1);
+ DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
+ *phi->GetBlock()->GetPredecessors().Get(1)));
+ for (size_t i = 2, e = phi->InputCount(); i < e; ++i) {
+ DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
+ *phi->GetBlock()->GetPredecessors().Get(i)));
+ if (input1 != phi->InputAt(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
void VisitPhi(HPhi* phi) {
- if (phi->IsLoopHeaderPhi() && phi->GetType() == Primitive::kPrimInt) {
- DCHECK_EQ(phi->InputCount(), 2U);
+ if (phi->IsLoopHeaderPhi()
+ && (phi->GetType() == Primitive::kPrimInt)
+ && HasSameInputAtBackEdges(phi)) {
HInstruction* instruction = phi->InputAt(1);
HInstruction *left;
int32_t increment;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e4c37de..f56e446 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -112,6 +112,10 @@ class SuspendCheckSlowPathARM : public SlowPathCodeARM {
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
// If not null, the block to branch to after the suspend check.
@@ -3539,8 +3543,18 @@ void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction)
void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathARM* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
__ LoadFromOffset(
kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9e02a1d..0222f93 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -285,6 +285,10 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
// If not null, the block to branch to after the suspend check.
@@ -1034,8 +1038,19 @@ void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
+
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireW();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8aa7796..cfb8702 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -153,6 +153,10 @@ class SuspendCheckSlowPathX86 : public SlowPathCodeX86 {
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
HBasicBlock* const successor_;
@@ -809,7 +813,6 @@ void InstructionCodeGeneratorX86::VisitGoto(HGoto* got) {
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -3993,8 +3996,19 @@ void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction)
void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathX86* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
+
__ fs()->cmpw(Address::Absolute(
Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()), Immediate(0));
if (successor == nullptr) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5ac6866..9d2fc43 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -136,6 +136,10 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
HBasicBlock* const successor_;
@@ -771,7 +775,6 @@ void InstructionCodeGeneratorX86_64::VisitGoto(HGoto* got) {
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -3864,8 +3867,19 @@ void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instructio
void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathX86_64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
+
__ gs()->cmpw(Address::Absolute(
Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(), true), Immediate(0));
if (successor == nullptr) {
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index 2bfecc6..8f69f4d 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -235,14 +235,13 @@ TEST(FindLoopsTest, Loop4) {
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header
- const int blocks2[] = {2, 3, 4, 5, 8};
- TestBlock(graph, 2, true, 2, blocks2, 5); // loop header
+ const int blocks2[] = {2, 3, 4, 5};
+ TestBlock(graph, 2, true, 2, blocks2, arraysize(blocks2)); // loop header
TestBlock(graph, 3, false, 2); // block in loop
- TestBlock(graph, 4, false, 2); // original back edge
- TestBlock(graph, 5, false, 2); // original back edge
+ TestBlock(graph, 4, false, 2); // back edge
+ TestBlock(graph, 5, false, 2); // back edge
TestBlock(graph, 6, false, -1); // return block
TestBlock(graph, 7, false, -1); // exit block
- TestBlock(graph, 8, false, 2); // synthesized back edge
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 8ea8f3c..bb27a94 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -288,6 +288,7 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
int id = loop_header->GetBlockId();
+ HLoopInformation* loop_information = loop_header->GetLoopInformation();
// Ensure the pre-header block is first in the list of
// predecessors of a loop header.
@@ -297,57 +298,48 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
id));
}
- // Ensure the loop header has only two predecessors and that only the
- // second one is a back edge.
+ // Ensure the loop header has only one incoming branch and the remaining
+ // predecessors are back edges.
size_t num_preds = loop_header->GetPredecessors().Size();
if (num_preds < 2) {
AddError(StringPrintf(
"Loop header %d has less than two predecessors: %zu.",
id,
num_preds));
- } else if (num_preds > 2) {
- AddError(StringPrintf(
- "Loop header %d has more than two predecessors: %zu.",
- id,
- num_preds));
} else {
- HLoopInformation* loop_information = loop_header->GetLoopInformation();
HBasicBlock* first_predecessor = loop_header->GetPredecessors().Get(0);
if (loop_information->IsBackEdge(*first_predecessor)) {
AddError(StringPrintf(
"First predecessor of loop header %d is a back edge.",
id));
}
- HBasicBlock* second_predecessor = loop_header->GetPredecessors().Get(1);
- if (!loop_information->IsBackEdge(*second_predecessor)) {
- AddError(StringPrintf(
- "Second predecessor of loop header %d is not a back edge.",
- id));
+ for (size_t i = 1, e = loop_header->GetPredecessors().Size(); i < e; ++i) {
+ HBasicBlock* predecessor = loop_header->GetPredecessors().Get(i);
+ if (!loop_information->IsBackEdge(*predecessor)) {
+ AddError(StringPrintf(
+ "Loop header %d has multiple incoming (non back edge) blocks.",
+ id));
+ }
}
}
- const ArenaBitVector& loop_blocks = loop_header->GetLoopInformation()->GetBlocks();
+ const ArenaBitVector& loop_blocks = loop_information->GetBlocks();
- // Ensure there is only one back edge per loop.
- size_t num_back_edges =
- loop_header->GetLoopInformation()->GetBackEdges().Size();
+ // Ensure back edges belong to the loop.
+ size_t num_back_edges = loop_information->GetBackEdges().Size();
if (num_back_edges == 0) {
AddError(StringPrintf(
"Loop defined by header %d has no back edge.",
id));
- } else if (num_back_edges > 1) {
- AddError(StringPrintf(
- "Loop defined by header %d has several back edges: %zu.",
- id,
- num_back_edges));
} else {
- DCHECK_EQ(num_back_edges, 1u);
- int back_edge_id = loop_header->GetLoopInformation()->GetBackEdges().Get(0)->GetBlockId();
- if (!loop_blocks.IsBitSet(back_edge_id)) {
- AddError(StringPrintf(
- "Loop defined by header %d has an invalid back edge %d.",
- id,
- back_edge_id));
+ for (size_t i = 0; i < num_back_edges; ++i) {
+ int back_edge_id = loop_information->GetBackEdges().Get(i)->GetBlockId();
+ if (!loop_blocks.IsBitSet(back_edge_id)) {
+ AddError(StringPrintf(
+ "Loop defined by header %d has an invalid back edge %d.",
+ id,
+ back_edge_id));
+ }
}
}
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 8a96ee9..1914339 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -445,44 +445,40 @@ TEST(LivenessTest, Loop5) {
TEST(LivenessTest, Loop6) {
// Bitsets are made of:
- // (constant0, constant4, constant5, phi in block 2, phi in block 8)
+ // (constant0, constant4, constant5, phi in block 2)
const char* expected =
"Block 0\n"
- " live in: (00000)\n"
- " live out: (11100)\n"
- " kill: (11100)\n"
+ " live in: (0000)\n"
+ " live out: (1110)\n"
+ " kill: (1110)\n"
"Block 1\n"
- " live in: (11100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
+ " live in: (1110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
"Block 2\n" // loop header
- " live in: (01100)\n"
- " live out: (01110)\n"
- " kill: (00010)\n"
+ " live in: (0110)\n"
+ " live out: (0111)\n"
+ " kill: (0001)\n"
"Block 3\n"
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
- "Block 4\n" // original back edge
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
- "Block 5\n" // original back edge
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
+ " live in: (0110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
+ "Block 4\n" // back edge
+ " live in: (0110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
+ "Block 5\n" // back edge
+ " live in: (0110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
"Block 6\n" // return block
- " live in: (00010)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
+ " live in: (0001)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n"
"Block 7\n" // exit block
- " live in: (00000)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
- "Block 8\n" // synthesized back edge
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00001)\n";
+ " live in: (0000)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d3ee770..ab56aff 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -191,24 +191,6 @@ void HGraph::SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor) {
void HGraph::SimplifyLoop(HBasicBlock* header) {
HLoopInformation* info = header->GetLoopInformation();
- // If there are more than one back edge, make them branch to the same block that
- // will become the only back edge. This simplifies finding natural loops in the
- // graph.
- // Also, if the loop is a do/while (that is the back edge is an if), change the
- // back edge to be a goto. This simplifies code generation of suspend cheks.
- if (info->NumberOfBackEdges() > 1 || info->GetBackEdges().Get(0)->GetLastInstruction()->IsIf()) {
- HBasicBlock* new_back_edge = new (arena_) HBasicBlock(this, header->GetDexPc());
- AddBlock(new_back_edge);
- new_back_edge->AddInstruction(new (arena_) HGoto());
- for (size_t pred = 0, e = info->GetBackEdges().Size(); pred < e; ++pred) {
- HBasicBlock* back_edge = info->GetBackEdges().Get(pred);
- back_edge->ReplaceSuccessor(header, new_back_edge);
- }
- info->ClearBackEdges();
- info->AddBackEdge(new_back_edge);
- new_back_edge->AddSuccessor(header);
- }
-
// Make sure the loop has only one pre header. This simplifies SSA building by having
// to just look at the pre header to know which locals are initialized at entry of the
// loop.
@@ -218,11 +200,9 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
AddBlock(pre_header);
pre_header->AddInstruction(new (arena_) HGoto());
- ArenaBitVector back_edges(arena_, GetBlocks().Size(), false);
- HBasicBlock* back_edge = info->GetBackEdges().Get(0);
for (size_t pred = 0; pred < header->GetPredecessors().Size(); ++pred) {
HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
- if (predecessor != back_edge) {
+ if (!info->IsBackEdge(*predecessor)) {
predecessor->ReplaceSuccessor(header, pre_header);
pred--;
}
@@ -230,9 +210,17 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
pre_header->AddSuccessor(header);
}
- // Make sure the second predecessor of a loop header is the back edge.
- if (header->GetPredecessors().Get(1) != info->GetBackEdges().Get(0)) {
- header->SwapPredecessors();
+ // Make sure the first predecessor of a loop header is the incoming block.
+ if (info->IsBackEdge(*header->GetPredecessors().Get(0))) {
+ HBasicBlock* to_swap = header->GetPredecessors().Get(0);
+ for (size_t pred = 1, e = header->GetPredecessors().Size(); pred < e; ++pred) {
+ HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
+ if (!info->IsBackEdge(*predecessor)) {
+ header->predecessors_.Put(pred, to_swap);
+ header->predecessors_.Put(0, predecessor);
+ break;
+ }
+ }
}
// Place the suspend check at the beginning of the header, so that live registers
@@ -357,21 +345,22 @@ void HLoopInformation::PopulateRecursive(HBasicBlock* block) {
}
bool HLoopInformation::Populate() {
- DCHECK_EQ(GetBackEdges().Size(), 1u);
- HBasicBlock* back_edge = GetBackEdges().Get(0);
- DCHECK(back_edge->GetDominator() != nullptr);
- if (!header_->Dominates(back_edge)) {
- // This loop is not natural. Do not bother going further.
- return false;
- }
+ for (size_t i = 0, e = GetBackEdges().Size(); i < e; ++i) {
+ HBasicBlock* back_edge = GetBackEdges().Get(i);
+ DCHECK(back_edge->GetDominator() != nullptr);
+ if (!header_->Dominates(back_edge)) {
+ // This loop is not natural. Do not bother going further.
+ return false;
+ }
- // Populate this loop: starting with the back edge, recursively add predecessors
- // that are not already part of that loop. Set the header as part of the loop
- // to end the recursion.
- // This is a recursive implementation of the algorithm described in
- // "Advanced Compiler Design & Implementation" (Muchnick) p192.
- blocks_.SetBit(header_->GetBlockId());
- PopulateRecursive(back_edge);
+ // Populate this loop: starting with the back edge, recursively add predecessors
+ // that are not already part of that loop. Set the header as part of the loop
+ // to end the recursion.
+ // This is a recursive implementation of the algorithm described in
+ // "Advanced Compiler Design & Implementation" (Muchnick) p192.
+ blocks_.SetBit(header_->GetBlockId());
+ PopulateRecursive(back_edge);
+ }
return true;
}
@@ -387,6 +376,14 @@ bool HLoopInformation::IsIn(const HLoopInformation& other) const {
return other.blocks_.IsBitSet(header_->GetBlockId());
}
+size_t HLoopInformation::GetLifetimeEnd() const {
+ size_t last_position = 0;
+ for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
+ last_position = std::max(back_edges_.Get(i)->GetLifetimeEnd(), last_position);
+ }
+ return last_position;
+}
+
bool HBasicBlock::Dominates(HBasicBlock* other) const {
// Walk up the dominator tree from `other`, to find out if `this`
// is an ancestor.
@@ -963,8 +960,9 @@ void HBasicBlock::DisconnectAndDelete() {
HLoopInformation* loop_info = it.Current();
loop_info->Remove(this);
if (loop_info->IsBackEdge(*this)) {
- // This deliberately leaves the loop in an inconsistent state and will
- // fail SSAChecker unless the entire loop is removed during the pass.
+ // If this was the last back edge of the loop, we deliberately leave the
+ // loop in an inconsistent state and will fail SSAChecker unless the
+ // entire loop is removed during the pass.
loop_info->RemoveBackEdge(this);
}
}
@@ -1075,8 +1073,7 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
HLoopInformation* loop_info = it.Current();
loop_info->Remove(other);
if (loop_info->IsBackEdge(*other)) {
- loop_info->ClearBackEdges();
- loop_info->AddBackEdge(this);
+ loop_info->ReplaceBackEdge(other, this);
}
}
@@ -1307,11 +1304,9 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
loop_it.Current()->Add(to);
}
if (info->IsBackEdge(*at)) {
- // Only `at` can become a back edge, as the inlined blocks
- // are predecessors of `at`.
- DCHECK_EQ(1u, info->NumberOfBackEdges());
- info->ClearBackEdges();
- info->AddBackEdge(to);
+ // Only `to` can become a back edge, as the inlined blocks
+ // are predecessors of `to`.
+ info->ReplaceBackEdge(at, to);
}
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 63f3c95..53a4d84 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -48,6 +48,7 @@ class HPhi;
class HSuspendCheck;
class LiveInterval;
class LocationSummary;
+class SlowPathCode;
class SsaBuilder;
static const int kDefaultNumberOfBlocks = 8;
@@ -397,16 +398,21 @@ class HLoopInformation : public ArenaObject<kArenaAllocMisc> {
return back_edges_;
}
- HBasicBlock* GetSingleBackEdge() const {
- DCHECK_EQ(back_edges_.Size(), 1u);
- return back_edges_.Get(0);
- }
+ // Returns the lifetime position of the back edge that has the
+ // greatest lifetime position.
+ size_t GetLifetimeEnd() const;
- void ClearBackEdges() {
- back_edges_.Reset();
+ void ReplaceBackEdge(HBasicBlock* existing, HBasicBlock* new_back_edge) {
+ for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
+ if (back_edges_.Get(i) == existing) {
+ back_edges_.Put(i, new_back_edge);
+ return;
+ }
+ }
+ UNREACHABLE();
}
- // Find blocks that are part of this loop. Returns whether the loop is a natural loop,
+ // Finds blocks that are part of this loop. Returns whether the loop is a natural loop,
// that is the header dominates the back edge.
bool Populate();
@@ -3247,19 +3253,25 @@ class HTemporary : public HTemplateInstruction<0> {
class HSuspendCheck : public HTemplateInstruction<0> {
public:
explicit HSuspendCheck(uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {}
+ : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc), slow_path_(nullptr) {}
bool NeedsEnvironment() const OVERRIDE {
return true;
}
uint32_t GetDexPc() const { return dex_pc_; }
+ void SetSlowPath(SlowPathCode* slow_path) { slow_path_ = slow_path; }
+ SlowPathCode* GetSlowPath() const { return slow_path_; }
DECLARE_INSTRUCTION(SuspendCheck);
private:
const uint32_t dex_pc_;
+ // Only used for code generation, in order to share the same slow path between back edges
+ // of a same loop.
+ SlowPathCode* slow_path_;
+
DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 1784168..09a6648 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -75,9 +75,7 @@ void SsaLivenessAnalysis::LinearizeGraph() {
HBasicBlock* block = it.Current();
size_t number_of_forward_predecessors = block->GetPredecessors().Size();
if (block->IsLoopHeader()) {
- // We rely on having simplified the CFG.
- DCHECK_EQ(1u, block->GetLoopInformation()->NumberOfBackEdges());
- number_of_forward_predecessors--;
+ number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
}
forward_predecessors.Put(block->GetBlockId(), number_of_forward_predecessors);
}
@@ -264,13 +262,12 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
}
if (block->IsLoopHeader()) {
- HBasicBlock* back_edge = block->GetLoopInformation()->GetBackEdges().Get(0);
+ size_t last_position = block->GetLoopInformation()->GetLifetimeEnd();
// For all live_in instructions at the loop header, we need to create a range
// that covers the full loop.
for (uint32_t idx : live_in->Indexes()) {
HInstruction* current = instructions_from_ssa_index_.Get(idx);
- current->GetLiveInterval()->AddLoopRange(block->GetLifetimeStart(),
- back_edge->GetLifetimeEnd());
+ current->GetLiveInterval()->AddLoopRange(block->GetLifetimeStart(), last_position);
}
}
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 7b98c4e..b550d8a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -973,7 +973,11 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
break;
}
- size_t back_edge_use_position = current->GetSingleBackEdge()->GetLifetimeEnd();
+ // We're only adding a synthesized use at the last back edge. Adding syntehsized uses on
+ // all back edges is not necessary: anything used in the loop will have its use at the
+ // last back edge. If we want branches in a loop to have better register allocation than
+ // another branch, then it is the linear order we should change.
+ size_t back_edge_use_position = current->GetLifetimeEnd();
if ((first_use_ != nullptr) && (first_use_->GetPosition() <= back_edge_use_position)) {
// There was a use already seen in this loop. Therefore the previous call to `AddUse`
// already inserted the backedge use. We can stop going outward.
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 00c241b..4cc9c3e 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -373,30 +373,26 @@ TEST(SsaTest, Loop6) {
const char* expected =
"BasicBlock 0, succ: 1\n"
" 0: IntConstant 0 [5]\n"
- " 1: IntConstant 4 [14, 8, 8]\n"
- " 2: IntConstant 5 [14]\n"
+ " 1: IntConstant 4 [5, 8, 8]\n"
+ " 2: IntConstant 5 [5]\n"
" 3: Goto\n"
"BasicBlock 1, pred: 0, succ: 2\n"
" 4: Goto\n"
- "BasicBlock 2, pred: 1, 8, succ: 6, 3\n"
- " 5: Phi(0, 14) [12, 6, 6]\n"
+ "BasicBlock 2, pred: 1, 4, 5, succ: 6, 3\n"
+ " 5: Phi(0, 2, 1) [12, 6, 6]\n"
" 6: Equal(5, 5) [7]\n"
" 7: If(6)\n"
"BasicBlock 3, pred: 2, succ: 5, 4\n"
" 8: Equal(1, 1) [9]\n"
" 9: If(8)\n"
- "BasicBlock 4, pred: 3, succ: 8\n"
+ "BasicBlock 4, pred: 3, succ: 2\n"
" 10: Goto\n"
- "BasicBlock 5, pred: 3, succ: 8\n"
+ "BasicBlock 5, pred: 3, succ: 2\n"
" 11: Goto\n"
"BasicBlock 6, pred: 2, succ: 7\n"
" 12: Return(5)\n"
"BasicBlock 7, pred: 6\n"
- " 13: Exit\n"
- // Synthesized single back edge of loop.
- "BasicBlock 8, pred: 5, 4, succ: 2\n"
- " 14: Phi(1, 2) [5]\n"
- " 15: Goto\n";
+ " 13: Exit\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,