From 9820b7c1dc70e75ad405b9e6e63578fa9fe94e94 Mon Sep 17 00:00:00 2001 From: Vladimir Marko Date: Thu, 2 Jan 2014 16:40:37 +0000 Subject: Early inlining of simple methods. Inlining "special" methods: empty methods, methods returning constants or their arguments, simple getters and setters. Bug: 8164439 Change-Id: I8c7fa9c14351fbb2470000b378a22974daaef236 --- compiler/dex/bb_optimizations.h | 28 ++++ compiler/dex/frontend.cc | 1 + compiler/dex/frontend.h | 1 + compiler/dex/local_value_numbering.cc | 20 ++- compiler/dex/mir_dataflow.cc | 2 +- compiler/dex/mir_graph.h | 7 + compiler/dex/mir_method_info.cc | 8 +- compiler/dex/mir_method_info.h | 8 +- compiler/dex/mir_optimization.cc | 93 ++++++++++++ compiler/dex/pass_driver.cc | 1 + compiler/dex/quick/dex_file_method_inliner.cc | 208 ++++++++++++++++++++++++++ compiler/dex/quick/dex_file_method_inliner.h | 20 ++- compiler/dex/quick/gen_invoke.cc | 10 ++ compiler/dex/quick/mir_to_lir.cc | 6 +- compiler/driver/compiler_driver-inl.h | 10 ++ compiler/driver/compiler_driver.h | 4 + 16 files changed, 413 insertions(+), 14 deletions(-) diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h index fb482bf..6d500a5 100644 --- a/compiler/dex/bb_optimizations.h +++ b/compiler/dex/bb_optimizations.h @@ -59,6 +59,34 @@ class CacheMethodLoweringInfo : public Pass { }; /** + * @class CallInlining + * @brief Perform method inlining pass. + */ +class CallInlining : public Pass { + public: + CallInlining() : Pass("CallInlining") { + } + + bool Gate(const CompilationUnit* cUnit) const { + return cUnit->mir_graph->InlineCallsGate(); + } + + void Start(CompilationUnit* cUnit) const { + cUnit->mir_graph->InlineCallsStart(); + } + + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const { + cUnit->mir_graph->InlineCalls(bb); + // No need of repeating, so just return false. + return false; + } + + void End(CompilationUnit* cUnit) const { + cUnit->mir_graph->InlineCallsEnd(); + } +}; + +/** * @class CodeLayout * @brief Perform the code layout pass. */ diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 4485b15..5a26064 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -52,6 +52,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi // (1 << kMatch) | // (1 << kPromoteCompilerTemps) | // (1 << kSuppressExceptionEdges) | + // (1 << kSuppressMethodInlining) | 0; static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h index 37c85b1..f714ecd 100644 --- a/compiler/dex/frontend.h +++ b/compiler/dex/frontend.h @@ -53,6 +53,7 @@ enum opt_control_vector { kPromoteCompilerTemps, kBranchFusing, kSuppressExceptionEdges, + kSuppressMethodInlining, }; // Force code generation paths for testing. diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index 61c6767..45167a8 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -196,8 +196,10 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { // Intentional fall-through. case Instruction::INVOKE_STATIC: case Instruction::INVOKE_STATIC_RANGE: - AdvanceGlobalMemory(); - MakeArgsAliasing(mir); + if ((mir->optimization_flags & MIR_INLINED) == 0) { + AdvanceGlobalMemory(); + MakeArgsAliasing(mir); + } break; case Instruction::MOVE_RESULT: @@ -213,13 +215,17 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { case Instruction::CONST_STRING_JUMBO: case Instruction::CONST_CLASS: case Instruction::NEW_ARRAY: - // 1 result, treat as unique each time, use result s_reg - will be unique. - res = MarkNonAliasingNonNull(mir); + if ((mir->optimization_flags & MIR_INLINED) == 0) { + // 1 result, treat as unique each time, use result s_reg - will be unique. + res = MarkNonAliasingNonNull(mir); + } break; case Instruction::MOVE_RESULT_WIDE: - // 1 wide result, treat as unique each time, use result s_reg - will be unique. - res = GetOperandValueWide(mir->ssa_rep->defs[0]); - SetOperandValueWide(mir->ssa_rep->defs[0], res); + if ((mir->optimization_flags & MIR_INLINED) == 0) { + // 1 wide result, treat as unique each time, use result s_reg - will be unique. + res = GetOperandValueWide(mir->ssa_rep->defs[0]); + SetOperandValueWide(mir->ssa_rep->defs[0], res); + } break; case kMirOpPhi: diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 1c0205d..36f1be7 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -1001,7 +1001,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) { static_cast(kNumPackedOpcodes)) { int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode); - if (flags & Instruction::kInvoke) { + if ((flags & Instruction::kInvoke) != 0 && (mir->optimization_flags & MIR_INLINED) == 0) { attributes_ &= ~METHOD_IS_LEAF; } } diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 036dd84..fd25798 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -522,6 +522,8 @@ class MIRGraph { return method_lowering_infos_.GetRawStorage()[mir->meta.method_lowering_info]; } + void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput); + void InitRegLocations(); void RemapRegLocations(); @@ -811,6 +813,11 @@ class MIRGraph { BasicBlock* NextDominatedBlock(BasicBlock* bb); bool LayoutBlocks(BasicBlock* bb); + bool InlineCallsGate(); + void InlineCallsStart(); + void InlineCalls(BasicBlock* bb); + void InlineCallsEnd(); + /** * @brief Perform the initial preparation for the Method Uses. */ diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc index 4580e76..2c33ef1 100644 --- a/compiler/dex/mir_method_info.cc +++ b/compiler/dex/mir_method_info.cc @@ -75,10 +75,14 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver, int fast_path_flags = compiler_driver->IsFastInvoke( soa, dex_cache, class_loader, mUnit, referrer_class.get(), resolved_method, &invoke_type, &target_method, devirt_target, &it->direct_code_, &it->direct_method_); - uint16_t other_flags = it->flags_ & ~kFlagFastPath & ~(kInvokeTypeMask << kBitSharpTypeBegin); + bool needs_clinit = + compiler_driver->NeedsClassInitialization(referrer_class.get(), resolved_method); + uint16_t other_flags = it->flags_ & + ~(kFlagFastPath | kFlagNeedsClassInitialization | (kInvokeTypeMask << kBitSharpTypeBegin)); it->flags_ = other_flags | (fast_path_flags != 0 ? kFlagFastPath : 0u) | - (static_cast(invoke_type) << kBitSharpTypeBegin); + (static_cast(invoke_type) << kBitSharpTypeBegin) | + (needs_clinit ? kFlagNeedsClassInitialization : 0u); it->target_dex_file_ = target_method.dex_file; it->target_method_idx_ = target_method.dex_method_index; it->stats_flags_ = fast_path_flags; diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h index f927f1d..efe92f3 100644 --- a/compiler/dex/mir_method_info.h +++ b/compiler/dex/mir_method_info.h @@ -123,6 +123,10 @@ class MirMethodLoweringInfo : public MirMethodInfo { return (flags_ & kFlagFastPath) != 0u; } + bool NeedsClassInitialization() const { + return (flags_ & kFlagNeedsClassInitialization) != 0u; + } + InvokeType GetInvokeType() const { return static_cast((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask); } @@ -158,10 +162,12 @@ class MirMethodLoweringInfo : public MirMethodInfo { kBitInvokeTypeEnd = kBitInvokeTypeBegin + 3, // 3 bits for invoke type. kBitSharpTypeBegin, kBitSharpTypeEnd = kBitSharpTypeBegin + 3, // 3 bits for sharp type. - kMethodLoweringInfoEnd = kBitSharpTypeEnd + kBitNeedsClassInitialization = kBitSharpTypeEnd, + kMethodLoweringInfoEnd }; COMPILE_ASSERT(kMethodLoweringInfoEnd <= 16, too_many_flags); static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath; + static constexpr uint16_t kFlagNeedsClassInitialization = 1u << kBitNeedsClassInitialization; static constexpr uint16_t kInvokeTypeMask = 7u; COMPILE_ASSERT((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask, assert_invoke_type_bits_ok); diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 333126b..45c8d87 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -17,6 +17,8 @@ #include "compiler_internals.h" #include "local_value_numbering.h" #include "dataflow_iterator-inl.h" +#include "dex/quick/dex_file_method_inliner.h" +#include "dex/quick/dex_file_to_method_inliner_map.h" namespace art { @@ -1113,6 +1115,97 @@ void MIRGraph::EliminateClassInitChecksEnd() { temp_scoped_alloc_.reset(); } +void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) { + uint32_t method_index = invoke->meta.method_lowering_info; + if (temp_bit_vector_->IsBitSet(method_index)) { + iget_or_iput->meta.ifield_lowering_info = temp_insn_data_[method_index]; + DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex()); + return; + } + + const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(invoke); + MethodReference target = method_info.GetTargetMethod(); + DexCompilationUnit inlined_unit( + cu_, cu_->class_loader, cu_->class_linker, *target.dex_file, + nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index, + 0u /* access_flags not used */, nullptr /* verified_method not used */); + MirIFieldLoweringInfo inlined_field_info(field_idx); + MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u); + DCHECK(inlined_field_info.IsResolved()); + + uint32_t field_info_index = ifield_lowering_infos_.Size(); + ifield_lowering_infos_.Insert(inlined_field_info); + temp_bit_vector_->SetBit(method_index); + temp_insn_data_[method_index] = field_info_index; + iget_or_iput->meta.ifield_lowering_info = field_info_index; +} + +bool MIRGraph::InlineCallsGate() { + if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 || + method_lowering_infos_.Size() == 0u) { + return false; + } + if (cu_->compiler_driver->GetMethodInlinerMap() == nullptr) { + // This isn't the Quick compiler. + return false; + } + return true; +} + +void MIRGraph::InlineCallsStart() { + // Prepare for inlining getters/setters. Since we're inlining at most 1 IGET/IPUT from + // each INVOKE, we can index the data by the MIR::meta::method_lowering_info index. + + DCHECK(temp_scoped_alloc_.get() == nullptr); + temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack)); + temp_bit_vector_size_ = method_lowering_infos_.Size(); + temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector( + temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc); + temp_bit_vector_->ClearAllBits(); + temp_insn_data_ = static_cast(temp_scoped_alloc_->Alloc( + temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray)); +} + +void MIRGraph::InlineCalls(BasicBlock* bb) { + if (bb->block_type != kDalvikByteCode) { + return; + } + for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { + if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) { + continue; + } + const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir); + if (!method_info.FastPath()) { + continue; + } + InvokeType sharp_type = method_info.GetSharpType(); + if ((sharp_type != kDirect) && + (sharp_type != kStatic || method_info.NeedsClassInitialization())) { + continue; + } + DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); + MethodReference target = method_info.GetTargetMethod(); + if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file) + ->GenInline(this, bb, mir, target.dex_method_index)) { + if (cu_->verbose) { + LOG(INFO) << "In \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file) + << "\" @0x" << std::hex << mir->offset + << " inlined " << method_info.GetInvokeType() << " (" << sharp_type << ") call to \"" + << PrettyMethod(target.dex_method_index, *target.dex_file) << "\""; + } + } + } +} + +void MIRGraph::InlineCallsEnd() { + DCHECK(temp_insn_data_ != nullptr); + temp_insn_data_ = nullptr; + DCHECK(temp_bit_vector_ != nullptr); + temp_bit_vector_ = nullptr; + DCHECK(temp_scoped_alloc_.get() != nullptr); + temp_scoped_alloc_.reset(); +} + void MIRGraph::DumpCheckStats() { Checkstats* stats = static_cast(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo)); diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc index f195aff..999ed2a 100644 --- a/compiler/dex/pass_driver.cc +++ b/compiler/dex/pass_driver.cc @@ -92,6 +92,7 @@ void PassDriver::InsertPass(const Pass* new_pass) { static const Pass* const gPasses[] = { GetPassInstance(), GetPassInstance(), + GetPassInstance(), GetPassInstance(), GetPassInstance(), GetPassInstance(), diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index e50ba24..53e26c7 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -21,6 +21,7 @@ #include "base/macros.h" #include "base/mutex.h" #include "base/mutex-inl.h" +#include "dex/frontend.h" #include "thread.h" #include "thread-inl.h" #include "dex/mir_graph.h" @@ -31,6 +32,23 @@ namespace art { +namespace { // anonymous namespace + +MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) { + ArenaAllocator* arena = mir_graph->GetArena(); + MIR* insn = static_cast(arena->Alloc(sizeof(MIR), kArenaAllocMIR)); + insn->offset = invoke->offset; + insn->width = invoke->width; + insn->optimization_flags = MIR_CALLEE; + if (move_return != nullptr) { + DCHECK_EQ(move_return->offset, invoke->offset + invoke->width); + insn->width += move_return->width; + } + return insn; +} + +} // anonymous namespace + const uint32_t DexFileMethodInliner::kIndexUnresolved; const char* const DexFileMethodInliner::kClassCacheNames[] = { "Z", // kClassCacheBoolean @@ -348,6 +366,51 @@ bool DexFileMethodInliner::GenSpecial(Mir2Lir* backend, uint32_t method_idx) { return backend->SpecialMIR2LIR(special); } +bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + uint32_t method_idx) { + InlineMethod method; + { + ReaderMutexLock mu(Thread::Current(), lock_); + auto it = inline_methods_.find(method_idx); + if (it == inline_methods_.end() || (it->second.flags & kInlineSpecial) == 0) { + return false; + } + method = it->second; + } + + MIR* move_result = nullptr; + bool result = true; + switch (method.opcode) { + case kInlineOpNop: + break; + case kInlineOpNonWideConst: + move_result = mir_graph->FindMoveResult(bb, invoke); + result = GenInlineConst(mir_graph, bb, invoke, move_result, method); + break; + case kInlineOpReturnArg: + move_result = mir_graph->FindMoveResult(bb, invoke); + result = GenInlineReturnArg(mir_graph, bb, invoke, move_result, method); + break; + case kInlineOpIGet: + move_result = mir_graph->FindMoveResult(bb, invoke); + result = GenInlineIGet(mir_graph, bb, invoke, move_result, method, method_idx); + break; + case kInlineOpIPut: + result = GenInlineIPut(mir_graph, bb, invoke, method, method_idx); + break; + default: + LOG(FATAL) << "Unexpected inline op: " << method.opcode; + } + if (result) { + invoke->optimization_flags |= MIR_INLINED; + if (move_result != nullptr) { + move_result->optimization_flags |= MIR_INLINED; + move_result->dalvikInsn.opcode = static_cast(kMirOpNop); + } + } + return result; +} + uint32_t DexFileMethodInliner::FindClassIndex(const DexFile* dex_file, IndexCache* cache, ClassCacheIndex index) { uint32_t* class_index = &cache->class_indexes[index]; @@ -484,4 +547,149 @@ bool DexFileMethodInliner::AddInlineMethod(int32_t method_idx, const InlineMetho } } +bool DexFileMethodInliner::GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + MIR* move_result, const InlineMethod& method) { + if (move_result == nullptr) { + // Result is unused. + return true; + } + + // Check the opcode and for MOVE_RESULT_OBJECT check also that the constant is null. + DCHECK(move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT || + (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT && + method.d.data == 0u)); + + // Insert the CONST instruction. + MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result); + insn->dalvikInsn.opcode = Instruction::CONST; + insn->dalvikInsn.vA = move_result->dalvikInsn.vA; + insn->dalvikInsn.vB = method.d.data; + mir_graph->InsertMIRAfter(bb, move_result, insn); + return true; +} + +bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + MIR* move_result, const InlineMethod& method) { + if (move_result == nullptr) { + // Result is unused. + return true; + } + + // Select opcode and argument. + const InlineReturnArgData& data = method.d.return_data; + Instruction::Code opcode = Instruction::MOVE_FROM16; + if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) { + DCHECK_EQ(data.is_object, 1u); + opcode = Instruction::MOVE_OBJECT_FROM16; + } else if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE) { + DCHECK_EQ(data.is_wide, 1u); + opcode = Instruction::MOVE_WIDE_FROM16; + } else { + DCHECK(move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT); + DCHECK_EQ(data.is_wide, 0u); + DCHECK_EQ(data.is_object, 0u); + } + DCHECK_LT(data.is_wide ? data.arg + 1u : data.arg, invoke->dalvikInsn.vA); + int arg; + if (Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k35c) { + arg = invoke->dalvikInsn.arg[data.arg]; // Non-range invoke. + } else { + DCHECK_EQ(Instruction::FormatOf(invoke->dalvikInsn.opcode), Instruction::k3rc); + arg = invoke->dalvikInsn.vC + data.arg; // Range invoke. + } + + // Insert the move instruction + MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result); + insn->dalvikInsn.opcode = opcode; + insn->dalvikInsn.vA = move_result->dalvikInsn.vA; + insn->dalvikInsn.vB = arg; + mir_graph->InsertMIRAfter(bb, move_result, insn); + return true; +} + +bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + MIR* move_result, const InlineMethod& method, + uint32_t method_idx) { + CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit(); + if (cu->enable_debug & (1 << kDebugSlowFieldPath)) { + return false; + } + + const InlineIGetIPutData& data = method.d.ifield_data; + if (invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC || + invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE || + data.object_arg != 0) { + // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE). + return false; + } + + if (move_result == nullptr) { + // Result is unused. If volatile, we still need to emit the IGET but we have no destination. + return !data.is_volatile; + } + + Instruction::Code opcode = static_cast(Instruction::IGET + data.op_variant); + DCHECK_EQ(InlineMethodAnalyser::IGetVariant(opcode), data.op_variant); + + MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result); + insn->width += insn->offset - invoke->offset; + insn->offset = invoke->offset; + insn->dalvikInsn.opcode = opcode; + insn->dalvikInsn.vA = move_result->dalvikInsn.vA; + DCHECK_LT(data.object_arg, invoke->dalvikInsn.vA); + if (Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc) { + insn->dalvikInsn.vB = invoke->dalvikInsn.vC + data.object_arg; + } else { + DCHECK_EQ(Instruction::FormatOf(invoke->dalvikInsn.opcode), Instruction::k35c); + insn->dalvikInsn.vB = invoke->dalvikInsn.arg[data.object_arg]; + } + mir_graph->ComputeInlineIFieldLoweringInfo(data.field_idx, invoke, insn); + + DCHECK(mir_graph->GetIFieldLoweringInfo(insn).IsResolved()); + DCHECK(mir_graph->GetIFieldLoweringInfo(insn).FastGet()); + DCHECK_EQ(data.field_offset, mir_graph->GetIFieldLoweringInfo(insn).FieldOffset().Uint32Value()); + DCHECK_EQ(data.is_volatile, mir_graph->GetIFieldLoweringInfo(insn).IsVolatile() ? 1u : 0u); + + mir_graph->InsertMIRAfter(bb, move_result, insn); + return true; +} + +bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + const InlineMethod& method, uint32_t method_idx) { + CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit(); + if (cu->enable_debug & (1 << kDebugSlowFieldPath)) { + return false; + } + + const InlineIGetIPutData& data = method.d.ifield_data; + if (invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC || + invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE || + data.object_arg != 0) { + // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE). + return false; + } + + Instruction::Code opcode = static_cast(Instruction::IPUT + data.op_variant); + DCHECK_EQ(InlineMethodAnalyser::IPutVariant(opcode), data.op_variant); + + MIR* insn = AllocReplacementMIR(mir_graph, invoke, nullptr); + insn->dalvikInsn.opcode = opcode; + if (Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc) { + insn->dalvikInsn.vA = invoke->dalvikInsn.vC + data.src_arg; + insn->dalvikInsn.vB = invoke->dalvikInsn.vC + data.object_arg; + } else { + insn->dalvikInsn.vA = invoke->dalvikInsn.arg[data.src_arg]; + insn->dalvikInsn.vB = invoke->dalvikInsn.arg[data.object_arg]; + } + mir_graph->ComputeInlineIFieldLoweringInfo(data.field_idx, invoke, insn); + + DCHECK(mir_graph->GetIFieldLoweringInfo(insn).IsResolved()); + DCHECK(mir_graph->GetIFieldLoweringInfo(insn).FastPut()); + DCHECK_EQ(data.field_offset, mir_graph->GetIFieldLoweringInfo(insn).FieldOffset().Uint32Value()); + DCHECK_EQ(data.is_volatile, mir_graph->GetIFieldLoweringInfo(insn).IsVolatile() ? 1u : 0u); + + mir_graph->InsertMIRAfter(bb, invoke, insn); + return true; +} + } // namespace art diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h index a6d4cab..b4e190a 100644 --- a/compiler/dex/quick/dex_file_method_inliner.h +++ b/compiler/dex/quick/dex_file_method_inliner.h @@ -31,7 +31,10 @@ namespace verifier { class MethodVerifier; } // namespace verifier +struct BasicBlock; struct CallInfo; +struct MIR; +class MIRGraph; class Mir2Lir; /** @@ -79,7 +82,13 @@ class DexFileMethodInliner { /** * Generate code for a special function. */ - bool GenSpecial(Mir2Lir* backend, uint32_t method_idx); + bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) LOCKS_EXCLUDED(lock_); + + /** + * Try to inline an invoke. + */ + bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx) + LOCKS_EXCLUDED(lock_); /** * To avoid multiple lookups of a class by its descriptor, we cache its @@ -286,6 +295,15 @@ class DexFileMethodInliner { bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) LOCKS_EXCLUDED(lock_); + static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + MIR* move_result, const InlineMethod& method); + static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + MIR* move_result, const InlineMethod& method); + static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + MIR* move_result, const InlineMethod& method, uint32_t method_idx); + static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, + const InlineMethod& method, uint32_t method_idx); + ReaderWriterMutex lock_; /* * Maps method indexes (for the particular DexFile) to Intrinsic defintions. diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 92c13ce..4f02fd7 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -1423,6 +1423,16 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, } void Mir2Lir::GenInvoke(CallInfo* info) { + if ((info->opt_flags & MIR_INLINED) != 0) { + // Already inlined but we may still need the null check. + if (info->type != kStatic && + ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 || + (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0)) { + RegLocation rl_obj = LoadValue(info->args[0], kCoreReg); + GenImmedCheck(kCondEq, rl_obj.reg.GetReg(), 0, kThrowNullPointer); + } + return; + } DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) ->GenIntrinsic(this, info)) { diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 538c292..39994e9 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -346,15 +346,17 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list break; case Instruction::MOVE_RESULT_WIDE: - if (opt_flags & MIR_INLINED) + if ((opt_flags & MIR_INLINED) != 0) { break; // Nop - combined w/ previous invoke. + } StoreValueWide(rl_dest, GetReturnWide(rl_dest.fp)); break; case Instruction::MOVE_RESULT: case Instruction::MOVE_RESULT_OBJECT: - if (opt_flags & MIR_INLINED) + if ((opt_flags & MIR_INLINED) != 0) { break; // Nop - combined w/ previous invoke. + } StoreValue(rl_dest, GetReturn(rl_dest.fp)); break; diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h index 664f809..d9f2a3a 100644 --- a/compiler/driver/compiler_driver-inl.h +++ b/compiler/driver/compiler_driver-inl.h @@ -289,6 +289,16 @@ inline int CompilerDriver::IsFastInvoke( return stats_flags; } +inline bool CompilerDriver::NeedsClassInitialization(mirror::Class* referrer_class, + mirror::ArtMethod* resolved_method) { + if (!resolved_method->IsStatic()) { + return false; + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + // NOTE: Unlike in IsFastStaticField(), we don't check CanAssumeTypeIsPresentInDexCache() here. + return methods_class != referrer_class && !methods_class->IsInitialized(); +} + } // namespace art #endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_ diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index d88b2aa..256aa46 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -283,6 +283,10 @@ class CompilerDriver { uintptr_t* direct_code, uintptr_t* direct_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Does invokation of the resolved method need class initialization? + bool NeedsClassInitialization(mirror::Class* referrer_class, mirror::ArtMethod* resolved_method) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ProcessedInstanceField(bool resolved); void ProcessedStaticField(bool resolved, bool local); void ProcessedInvoke(InvokeType invoke_type, int flags); -- cgit v1.1