summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/type_inference.cc16
-rw-r--r--compiler/optimizing/optimizing_compiler.cc17
2 files changed, 22 insertions, 11 deletions
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
index a0dfcbe..c93fe20 100644
--- a/compiler/dex/type_inference.cc
+++ b/compiler/dex/type_inference.cc
@@ -572,15 +572,21 @@ void TypeInference::Finish() {
if (type_conflict) {
/*
- * We don't normally expect to see a Dalvik register definition used both as a
- * floating point and core value, though technically it could happen with constants.
- * Until we have proper typing, detect this situation and disable register promotion
- * (which relies on the distinction between core a fp usages).
+ * Each dalvik register definition should be used either as a reference, or an
+ * integer or a floating point value. We don't normally expect to see a Dalvik
+ * register definition used in two or three of these roles though technically it
+ * could happen with constants (0 for all three roles, non-zero for integer and
+ * FP). Detect this situation and disable optimizations that rely on correct
+ * typing, i.e. register promotion, GVN/LVN and GVN-based DCE.
*/
LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< " has type conflict block for sreg " << conflict_s_reg
<< ", disabling register promotion.";
- cu_->disable_opt |= (1 << kPromoteRegs);
+ cu_->disable_opt |=
+ (1u << kPromoteRegs) |
+ (1u << kGlobalValueNumbering) |
+ (1u << kGvnDeadCodeElimination) |
+ (1u << kLocalValueNumbering);
}
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 75217c8..f1293b7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -492,6 +492,16 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
instruction_set = kThumb2;
}
+ // `run_optimizations_` is set explicitly (either through a compiler filter
+ // or the debuggable flag). If it is set, we can run baseline. Otherwise, we
+ // fall back to Quick.
+ bool should_use_baseline = !run_optimizations_;
+ bool can_optimize = CanOptimize(*code_item);
+ if (!can_optimize && !should_use_baseline) {
+ // We know we will not compile this method. Bail out before doing any work.
+ return nullptr;
+ }
+
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa);
@@ -565,13 +575,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
}
- bool can_optimize = CanOptimize(*code_item);
bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set);
- // `run_optimizations_` is set explicitly (either through a compiler filter
- // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back
- // to Quick.
- bool can_use_baseline = !run_optimizations_;
if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << method_name;
@@ -594,7 +599,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
} else if (shouldOptimize && can_allocate_registers) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
UNREACHABLE();
- } else if (can_use_baseline) {
+ } else if (should_use_baseline) {
VLOG(compiler) << "Compile baseline " << method_name;
if (!run_optimizations_) {