summaryrefslogtreecommitdiffstats
path: root/compiler/dex/mir_optimization.cc
diff options
context:
space:
mode:
authorbuzbee <buzbee@google.com>2013-08-19 07:37:40 -0700
committerbuzbee <buzbee@google.com>2013-08-19 07:37:40 -0700
commitcbcfaf3a410e35730c4daeaff6c791665764925a (patch)
tree92f836a197f74ee912135fd4bb389cd261e6d4a5 /compiler/dex/mir_optimization.cc
parent212ec8f32919d50a1e1cb7ea4b3b91ca938ae4e6 (diff)
downloadart-cbcfaf3a410e35730c4daeaff6c791665764925a.zip
art-cbcfaf3a410e35730c4daeaff6c791665764925a.tar.gz
art-cbcfaf3a410e35730c4daeaff6c791665764925a.tar.bz2
Fix suspend check optimization
Art's Quick compiler currently uses a convervative mechanism to ensure that a safe point will be reached within a "small" amount of time. Explicit suspend checks are placed prior to backwards branches and on returns. There are a lot of ways to optimize, which we'll get to in the future, but for now the only optimization is to detect a backwards branch that targets a return block. That's a common pattern in dex, and simple to detect. In those cases, we can suppress the suspend check on the backwards branch knowing that the return will do it. However, the notion of what is a backwards branch got a bit muddied with some mir optimizations that transform the graph by changing the sense of branches. What started off as a taken backwards branch may turn into a fallthrough backwards branch. This CL avoid the confusion by marking branches backwards based on their original dex targets rather than using the post-transform test of backwardness. Change-Id: I9b30be168c801af51bae7f66ecd442edcb115a18
Diffstat (limited to 'compiler/dex/mir_optimization.cc')
-rw-r--r--compiler/dex/mir_optimization.cc12
1 files changed, 10 insertions, 2 deletions
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index a6314f4..82ba6e3 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -153,7 +153,14 @@ static BasicBlock* NextDominatedBlock(BasicBlock* bb) {
}
DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock));
- bb = bb->fall_through;
+ if (((bb->taken != NULL) && (bb->fall_through == NULL)) &&
+ ((bb->taken->block_type == kDalvikByteCode) || (bb->taken->block_type == kExitBlock))) {
+ // Follow simple unconditional branches.
+ bb = bb->taken;
+ } else {
+ // Follow simple fallthrough
+ bb = (bb->taken != NULL) ? NULL : bb->fall_through;
+ }
if (bb == NULL || (Predecessors(bb) != 1)) {
return NULL;
}
@@ -303,7 +310,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ:
- if (bb->taken->dominates_return) {
+ // If we've got a backwards branch to return, no need to suspend check.
+ if ((bb->taken->dominates_return) && (mir->backwards_branch)) {
mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
if (cu_->verbose) {
LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;