diff options
author | Vladimir Marko <vmarko@google.com> | 2015-03-19 10:07:02 +0000 |
---|---|---|
committer | Vladimir Marko <vmarko@google.com> | 2015-03-31 08:56:17 +0100 |
commit | 20f85597828194c12be10d3a927999def066555e (patch) | |
tree | 486c10dca0811b036a0cd5f80c02650ac43b11a5 /compiler/dex | |
parent | 7d8c6776d7bdcc04411154aa215ba5909939192a (diff) | |
download | art-20f85597828194c12be10d3a927999def066555e.zip art-20f85597828194c12be10d3a927999def066555e.tar.gz art-20f85597828194c12be10d3a927999def066555e.tar.bz2 |
Fixed layout for dex caches in boot image.
Define a fixed layout for dex cache arrays (type, method,
string and field arrays) for dex caches in the boot image.
This gives those arrays fixed offsets from the boot image
code and allows PC-relative addressing of their elements.
Use the PC-relative load on arm64 for relevant instructions,
i.e. invoke-static, invoke-direct, const-string,
const-class, check-cast and instance-of. This reduces the
arm64 boot.oat on Nexus 9 by 1.1MiB.
This CL provides the infrastructure and shows on the arm64
the gains that we can achieve by having fixed dex cache
arrays' layout. To fully use this for the boot images, we
need to implement the PC-relative addressing for other
architectures. To achieve similar gains for apps, we need
to move the dex cache arrays to a .bss section of the oat
file. These changes will be implemented in subsequent CLs.
(Also remove some compiler_driver.h dependencies to reduce
incremental build times.)
Change-Id: Ib1859fa4452d01d983fd92ae22b611f45a85d69b
Diffstat (limited to 'compiler/dex')
-rw-r--r-- | compiler/dex/mir_field_info.cc | 1 | ||||
-rw-r--r-- | compiler/dex/mir_graph.cc | 9 | ||||
-rw-r--r-- | compiler/dex/mir_method_info.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/arm/call_arm.cc | 1 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/arm64_lir.h | 1 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/assemble_arm64.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/call_arm64.cc | 54 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/codegen_arm64.h | 10 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/int_arm64.cc | 23 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/target_arm64.cc | 18 | ||||
-rw-r--r-- | compiler/dex/quick/codegen_util.cc | 13 | ||||
-rw-r--r-- | compiler/dex/quick/gen_common.cc | 302 | ||||
-rwxr-xr-x | compiler/dex/quick/gen_invoke.cc | 1 | ||||
-rw-r--r-- | compiler/dex/quick/gen_loadstore.cc | 14 | ||||
-rw-r--r-- | compiler/dex/quick/local_optimizations.cc | 9 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 29 | ||||
-rw-r--r-- | compiler/dex/quick/quick_compiler.cc | 6 | ||||
-rw-r--r-- | compiler/dex/quick/x86/call_x86.cc | 1 |
18 files changed, 291 insertions, 207 deletions
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc index d2079a2..a9ab3bb 100644 --- a/compiler/dex/mir_field_info.cc +++ b/compiler/dex/mir_field_info.cc @@ -19,6 +19,7 @@ #include <string.h> #include "base/logging.h" +#include "dex/verified_method.h" #include "driver/compiler_driver.h" #include "driver/compiler_driver-inl.h" #include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>. diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 3103f96..58f12c9 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -688,7 +688,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse /* Parse a Dex method and insert it into the MIRGraph at the current insert point. */ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, - InvokeType invoke_type, uint16_t class_def_idx, + InvokeType invoke_type ATTRIBUTE_UNUSED, uint16_t class_def_idx, uint32_t method_idx, jobject class_loader, const DexFile& dex_file) { current_code_item_ = code_item; method_stack_.push_back(std::make_pair(current_method_, current_offset_)); @@ -726,13 +726,6 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ null_block->hidden = true; entry_block_ = CreateNewBB(kEntryBlock); exit_block_ = CreateNewBB(kExitBlock); - // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated. - cu_->dex_file = &dex_file; - cu_->class_def_idx = class_def_idx; - cu_->method_idx = method_idx; - cu_->access_flags = access_flags; - cu_->invoke_type = invoke_type; - cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); } else { UNIMPLEMENTED(FATAL) << "Nested inlining not implemented."; /* diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc index 34fb1bf..831ad42 100644 --- a/compiler/dex/mir_method_info.cc +++ b/compiler/dex/mir_method_info.cc @@ -16,9 +16,11 @@ # include "mir_method_info.h" +#include "dex/verified_method.h" #include "driver/compiler_driver.h" #include "driver/dex_compilation_unit.h" #include "driver/compiler_driver-inl.h" +#include "driver/compiler_options.h" #include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>. #include "mirror/dex_cache.h" // Only to allow casts in Handle<DexCache>. #include "scoped_thread_state_change.h" diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index d46c25a..3081c9e 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -23,6 +23,7 @@ #include "dex/mir_graph.h" #include "dex/quick/mir_to_lir-inl.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "gc/accounting/card_table.h" #include "mirror/art_method.h" #include "mirror/object_array-inl.h" diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h index d15412a..f6fa938 100644 --- a/compiler/dex/quick/arm64/arm64_lir.h +++ b/compiler/dex/quick/arm64/arm64_lir.h @@ -236,6 +236,7 @@ enum A64Opcode { kA64Add4rrro, // add [00001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0]. kA64Add4RRre, // add [00001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0]. kA64Adr2xd, // adr [0] immlo[30-29] [10000] immhi[23-5] rd[4-0]. + kA64Adrp2xd, // adrp [1] immlo[30-29] [10000] immhi[23-5] rd[4-0]. kA64And3Rrl, // and [00010010] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0]. kA64And4rrro, // and [00001010] shift[23-22] [N=0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0]. kA64Asr3rrd, // asr [0001001100] immr[21-16] imms[15-10] rn[9-5] rd[4-0]. diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc index 329bb1e..a59deb5 100644 --- a/compiler/dex/quick/arm64/assemble_arm64.cc +++ b/compiler/dex/quick/arm64/assemble_arm64.cc @@ -131,6 +131,10 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = { kFmtRegX, 4, 0, kFmtImm21, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | NEEDS_FIXUP, "adr", "!0x, #!1d", kFixupAdr), + ENCODING_MAP(kA64Adrp2xd, NO_VARIANTS(0x90000000), + kFmtRegX, 4, 0, kFmtImm21, -1, -1, kFmtUnused, -1, -1, + kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | NEEDS_FIXUP, + "adrp", "!0x, #!1d", kFixupLabel), ENCODING_MAP(WIDE(kA64And3Rrl), SF_VARIANTS(0x12000000), kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10, kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1, diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index 823cb60..3316945 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -23,10 +23,12 @@ #include "dex/mir_graph.h" #include "dex/quick/mir_to_lir-inl.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "gc/accounting/card_table.h" #include "entrypoints/quick/quick_entrypoints.h" #include "mirror/art_method.h" #include "mirror/object_array-inl.h" +#include "utils/dex_cache_arrays_layout-inl.h" namespace art { @@ -438,13 +440,13 @@ static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& tar * Bit of a hack here - in the absence of a real scheduling pass, * emit the next instruction in static & direct invoke sequences. */ -static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, - int state, const MethodReference& target_method, - uint32_t unused_idx, - uintptr_t direct_code, uintptr_t direct_method, - InvokeType type) { +int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, + int state, const MethodReference& target_method, + uint32_t unused_idx, + uintptr_t direct_code, uintptr_t direct_method, + InvokeType type) { UNUSED(info, unused_idx); - Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); + Arm64Mir2Lir* cg = static_cast<Arm64Mir2Lir*>(cu->cg.get()); if (direct_code != 0 && direct_method != 0) { switch (state) { case 0: // Get the current Method* [sets kArg0] @@ -465,17 +467,24 @@ static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, return -1; } } else { + bool use_pc_rel = cg->CanUseOpPcRelDexCacheArrayLoad(); RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); switch (state) { case 0: // Get the current Method* [sets kArg0] // TUNING: we can save a reg copy if Method* has been promoted. - cg->LoadCurrMethodDirect(arg0_ref); - break; + if (!use_pc_rel) { + cg->LoadCurrMethodDirect(arg0_ref); + break; + } + ++state; + FALLTHROUGH_INTENDED; case 1: // Get method->dex_cache_resolved_methods_ - cg->LoadRefDisp(arg0_ref, - mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), - arg0_ref, - kNotVolatile); + if (!use_pc_rel) { + cg->LoadRefDisp(arg0_ref, + mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), + arg0_ref, + kNotVolatile); + } // Set up direct code if known. if (direct_code != 0) { if (direct_code != static_cast<uintptr_t>(-1)) { @@ -487,14 +496,23 @@ static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, cg->LoadCodeAddress(target_method, type, kInvokeTgt); } } - break; + if (!use_pc_rel || direct_code != 0) { + break; + } + ++state; + FALLTHROUGH_INTENDED; case 2: // Grab target method* CHECK_EQ(cu->dex_file, target_method.dex_file); - cg->LoadRefDisp(arg0_ref, - mirror::ObjectArray<mirror::Object>::OffsetOfElement( - target_method.dex_method_index).Int32Value(), - arg0_ref, - kNotVolatile); + if (!use_pc_rel) { + cg->LoadRefDisp(arg0_ref, + mirror::ObjectArray<mirror::Object>::OffsetOfElement( + target_method.dex_method_index).Int32Value(), + arg0_ref, + kNotVolatile); + } else { + size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index); + cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref); + } break; case 3: // Grab the code from the method* if (direct_code == 0) { diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index 54fd46d..8184f02 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -78,6 +78,9 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage) void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE; + bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE; + void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) OVERRIDE; + LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, int offset, int check_value, LIR* target, LIR** compare) OVERRIDE; @@ -393,9 +396,16 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_div, int flags); + static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, + int state, const MethodReference& target_method, + uint32_t unused_idx, + uintptr_t direct_code, uintptr_t direct_method, + InvokeType type); + static const A64EncodingMap EncodingMap[kA64Last]; ArenaVector<LIR*> call_method_insns_; + ArenaVector<LIR*> dex_cache_access_insns_; int GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) OVERRIDE; }; diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index 2372ccc..e9b9b5d 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -18,6 +18,7 @@ #include "codegen_arm64.h" +#include "arch/arm64/instruction_set_features_arm64.h" #include "arch/instruction_set_features.h" #include "arm64_lir.h" #include "base/logging.h" @@ -943,6 +944,28 @@ void Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) { lir->target = target; } +bool Arm64Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const { + if (cu_->compiler_driver->GetInstructionSetFeatures()->AsArm64InstructionSetFeatures() + ->NeedFixCortexA53_843419()) { + // TODO: Implement link-time workaround in OatWriter so that we can use ADRP on Cortex-A53. + return false; + } + return dex_cache_arrays_layout_.Valid(); +} + +void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, + RegStorage r_dest) { + LIR* adrp = NewLIR2(kA64Adrp2xd, r_dest.GetReg(), 0); + adrp->operands[2] = WrapPointer(dex_file); + adrp->operands[3] = offset; + adrp->operands[4] = WrapPointer(adrp); + dex_cache_access_insns_.push_back(adrp); + LIR* ldr = LoadBaseDisp(r_dest, 0, r_dest, kReference, kNotVolatile); + ldr->operands[4] = adrp->operands[4]; + ldr->flags.fixup = kFixupLabel; + dex_cache_access_insns_.push_back(ldr); +} + LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) { UNUSED(r_base, count); LOG(FATAL) << "Unexpected use of OpVldm for Arm64"; diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc index 09a34bf..c5c0dc5 100644 --- a/compiler/dex/quick/arm64/target_arm64.cc +++ b/compiler/dex/quick/arm64/target_arm64.cc @@ -606,7 +606,8 @@ RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volat Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) : Mir2Lir(cu, mir_graph, arena), - call_method_insns_(arena->Adapter()) { + call_method_insns_(arena->Adapter()), + dex_cache_access_insns_(arena->Adapter()) { // Sanity check - make sure encoding map lines up. for (int i = 0; i < kA64Last; i++) { DCHECK_EQ(UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode), i) @@ -846,8 +847,9 @@ RegStorage Arm64Mir2Lir::InToRegStorageArm64Mapper::GetNextReg(ShortyArg arg) { } void Arm64Mir2Lir::InstallLiteralPools() { + patches_.reserve(call_method_insns_.size() + dex_cache_access_insns_.size()); + // PC-relative calls to methods. - patches_.reserve(call_method_insns_.size()); for (LIR* p : call_method_insns_) { DCHECK_EQ(p->opcode, kA64Bl1t); uint32_t target_method_idx = p->operands[1]; @@ -856,6 +858,18 @@ void Arm64Mir2Lir::InstallLiteralPools() { target_dex_file, target_method_idx)); } + // PC-relative references to dex cache arrays. + for (LIR* p : dex_cache_access_insns_) { + DCHECK(p->opcode == kA64Adrp2xd || p->opcode == kA64Ldr3rXD); + const LIR* adrp = UnwrapPointer<LIR>(p->operands[4]); + DCHECK_EQ(adrp->opcode, kA64Adrp2xd); + const DexFile* dex_file = UnwrapPointer<DexFile>(adrp->operands[2]); + uint32_t offset = adrp->operands[3]; + DCHECK(!p->flags.is_nop); + DCHECK(!adrp->flags.is_nop); + patches_.push_back(LinkerPatch::DexCacheArrayPatch(p->offset, dex_file, adrp->offset, offset)); + } + // And do the normal processing. Mir2Lir::InstallLiteralPools(); } diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index df72830..509d448 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -29,6 +29,7 @@ #include "dex/quick/dex_file_to_method_inliner_map.h" #include "dex/verification_results.h" #include "dex/verified_method.h" +#include "utils/dex_cache_arrays_layout-inl.h" #include "verifier/dex_gc_map.h" #include "verifier/method_verifier.h" #include "vmap_table.h" @@ -1053,6 +1054,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena mem_ref_type_(ResourceMask::kHeapRef), mask_cache_(arena), safepoints_(arena->Adapter()), + dex_cache_arrays_layout_(cu->compiler_driver->GetDexCacheArraysLayout(cu->dex_file)), in_to_reg_storage_mapping_(arena) { switch_tables_.reserve(4); fill_array_data_.reserve(4); @@ -1304,6 +1306,17 @@ void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx, OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target); } +bool Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const { + return false; +} + +void Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file ATTRIBUTE_UNUSED, + int offset ATTRIBUTE_UNUSED, + RegStorage r_dest ATTRIBUTE_UNUSED) { + LOG(FATAL) << "No generic implementation."; + UNREACHABLE(); +} + std::vector<uint8_t>* Mir2Lir::ReturnFrameDescriptionEntry() { // Default case is to do nothing. return nullptr; diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 2bcaaca..1813e09 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -24,12 +24,14 @@ #include "dex/mir_graph.h" #include "dex/quick/arm/arm_lir.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "entrypoints/quick/quick_entrypoints.h" #include "mirror/array.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "mirror/object_reference.h" #include "utils.h" +#include "utils/dex_cache_arrays_layout-inl.h" #include "verifier/method_verifier.h" namespace art { @@ -56,6 +58,42 @@ ALWAYS_INLINE static inline bool ForceSlowTypePath(CompilationUnit* cu) { return (cu->enable_debug & (1 << kDebugSlowTypePath)) != 0; } +void Mir2Lir::GenIfNullUseHelperImmMethod( + RegStorage r_result, QuickEntrypointEnum trampoline, int imm, RegStorage r_method) { + class CallHelperImmMethodSlowPath : public LIRSlowPath { + public: + CallHelperImmMethodSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, + QuickEntrypointEnum trampoline_in, int imm_in, + RegStorage r_method_in, RegStorage r_result_in) + : LIRSlowPath(m2l, fromfast, cont), trampoline_(trampoline_in), + imm_(imm_in), r_method_(r_method_in), r_result_(r_result_in) { + } + + void Compile() { + GenerateTargetLabel(); + if (r_method_.Valid()) { + m2l_->CallRuntimeHelperImmReg(trampoline_, imm_, r_method_, true); + } else { + m2l_->CallRuntimeHelperImmMethod(trampoline_, imm_, true); + } + m2l_->OpRegCopy(r_result_, m2l_->TargetReg(kRet0, kRef)); + m2l_->OpUnconditionalBranch(cont_); + } + + private: + QuickEntrypointEnum trampoline_; + const int imm_; + const RegStorage r_method_; + const RegStorage r_result_; + }; + + LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL); + LIR* cont = NewLIR0(kPseudoTargetLabel); + + AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm, + r_method, r_result)); +} + /* * Generate a kPseudoBarrier marker to indicate the boundary of special * blocks. @@ -1022,64 +1060,41 @@ void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl } void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { - RegLocation rl_method = LoadCurrMethod(); - CheckRegLocation(rl_method); - RegStorage res_reg = AllocTempRef(); + RegLocation rl_result; if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { // Call out to helper which resolves type and verifies access. // Resolved type returned in kRet0. - CallRuntimeHelperImmReg(kQuickInitializeTypeAndVerifyAccess, type_idx, rl_method.reg, true); - RegLocation rl_result = GetReturn(kRefReg); - StoreValue(rl_dest, rl_result); + CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true); + rl_result = GetReturn(kRefReg); } else { - RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); - // We're don't need access checks, load type from dex cache - int32_t dex_cache_offset = - mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); - LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile); - int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); - LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); + rl_result = EvalLoc(rl_dest, kRefReg, true); + // We don't need access checks, load type from dex cache + RegStorage r_method = RegStorage::InvalidReg(); + if (CanUseOpPcRelDexCacheArrayLoad()) { + size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg); + } else { + RegLocation rl_method = LoadCurrMethod(); + CheckRegLocation(rl_method); + r_method = rl_method.reg; + int32_t dex_cache_offset = + mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); + RegStorage res_reg = AllocTempRef(); + LoadRefDisp(r_method, dex_cache_offset, res_reg, kNotVolatile); + int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); + LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); + FreeTemp(res_reg); + } if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx) || ForceSlowTypePath(cu_)) { // Slow path, at runtime test if type is null and if so initialize FlushAllRegs(); - LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); - LIR* cont = NewLIR0(kPseudoTargetLabel); - - // Object to generate the slow path for class resolution. - class SlowPath : public LIRSlowPath { - public: - SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in, - const RegLocation& rl_method_in, const RegLocation& rl_result_in) - : LIRSlowPath(m2l, fromfast, cont_in), - type_idx_(type_idx_in), rl_method_(rl_method_in), rl_result_(rl_result_in) { - } - - void Compile() { - GenerateTargetLabel(); - - m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, rl_method_.reg, true); - m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0, kRef)); - m2l_->OpUnconditionalBranch(cont_); - } - - private: - const int type_idx_; - const RegLocation rl_method_; - const RegLocation rl_result_; - }; - - // Add to list for future. - AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); - - StoreValue(rl_dest, rl_result); - } else { - // Fast path, we're done - just store result - StoreValue(rl_dest, rl_result); + GenIfNullUseHelperImmMethod(rl_result.reg, kQuickInitializeType, type_idx, r_method); } } + StoreValue(rl_dest, rl_result); } void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { @@ -1092,64 +1107,42 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { FlushAllRegs(); LockCallTemps(); // Using explicit registers - // If the Method* is already in a register, we can save a copy. - RegLocation rl_method = mir_graph_->GetMethodLoc(); - RegStorage r_method; - if (rl_method.location == kLocPhysReg) { - // A temp would conflict with register use below. - DCHECK(!IsTemp(rl_method.reg)); - r_method = rl_method.reg; - } else { - r_method = TargetReg(kArg2, kRef); - LoadCurrMethodDirect(r_method); - } - // Method to declaring class. - LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), - TargetReg(kArg0, kRef), kNotVolatile); - // Declaring class to dex cache strings. - LoadRefDisp(TargetReg(kArg0, kRef), mirror::Class::DexCacheStringsOffset().Int32Value(), - TargetReg(kArg0, kRef), kNotVolatile); - // Might call out to helper, which will return resolved string in kRet0 - LoadRefDisp(TargetReg(kArg0, kRef), offset_of_string, TargetReg(kRet0, kRef), kNotVolatile); - LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0, kRef), 0, NULL); - LIR* cont = NewLIR0(kPseudoTargetLabel); - - { - // Object to generate the slow path for string resolution. - class SlowPath : public LIRSlowPath { - public: - SlowPath(Mir2Lir* m2l, LIR* fromfast_in, LIR* cont_in, RegStorage r_method_in, - int32_t string_idx_in) - : LIRSlowPath(m2l, fromfast_in, cont_in), - r_method_(r_method_in), string_idx_(string_idx_in) { - } - - void Compile() { - GenerateTargetLabel(); - m2l_->CallRuntimeHelperImmReg(kQuickResolveString, string_idx_, r_method_, true); - m2l_->OpUnconditionalBranch(cont_); - } - - private: - const RegStorage r_method_; - const int32_t string_idx_; - }; - - AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); + RegStorage ret0 = TargetReg(kRet0, kRef); + RegStorage r_method = RegStorage::InvalidReg(); + if (CanUseOpPcRelDexCacheArrayLoad()) { + size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0); + } else { + r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); + // Method to declaring class. + RegStorage arg0 = TargetReg(kArg0, kRef); + LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + arg0, kNotVolatile); + // Declaring class to dex cache strings. + LoadRefDisp(arg0, mirror::Class::DexCacheStringsOffset().Int32Value(), arg0, kNotVolatile); + + LoadRefDisp(arg0, offset_of_string, ret0, kNotVolatile); } + GenIfNullUseHelperImmMethod(ret0, kQuickResolveString, string_idx, r_method); GenBarrier(); StoreValue(rl_dest, GetReturn(kRefReg)); } else { - RegLocation rl_method = LoadCurrMethod(); - RegStorage res_reg = AllocTempRef(); RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); - LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), res_reg, - kNotVolatile); - LoadRefDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg, - kNotVolatile); - LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile); + if (CanUseOpPcRelDexCacheArrayLoad()) { + size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg); + } else { + RegLocation rl_method = LoadCurrMethod(); + RegStorage res_reg = AllocTempRef(); + LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), res_reg, + kNotVolatile); + LoadRefDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg, + kNotVolatile); + LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile); + FreeTemp(res_reg); + } StoreValue(rl_dest, rl_result); } } @@ -1224,14 +1217,20 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re RegStorage check_class = AllocTypedTemp(false, kRefReg); RegStorage object_class = AllocTypedTemp(false, kRefReg); - LoadCurrMethodDirect(check_class); if (use_declaring_class) { - LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, + RegStorage r_method = LoadCurrMethodWithHint(check_class); + LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, + kNotVolatile); + LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, kNotVolatile); + } else if (CanUseOpPcRelDexCacheArrayLoad()) { + size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, check_class); LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, kNotVolatile); } else { - LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + RegStorage r_method = LoadCurrMethodWithHint(check_class); + LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class, kNotVolatile); LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, kNotVolatile); @@ -1267,20 +1266,19 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know FlushAllRegs(); // May generate a call - use explicit registers LockCallTemps(); - RegStorage method_reg = TargetReg(kArg1, kRef); - LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg0 will hold the ref. RegStorage ret_reg = GetReturn(kRefReg).reg; if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kArg0 - CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true); + CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true); OpRegCopy(class_reg, ret_reg); // Align usage with fast path LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref } else if (use_declaring_class) { + RegStorage r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref - LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { if (can_assume_type_is_in_dex_cache) { @@ -1288,42 +1286,23 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref } - // Load dex cache entry into class_reg (kArg2) - LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), - class_reg, kNotVolatile); - int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); - LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); + RegStorage r_method = RegStorage::InvalidReg(); + if (CanUseOpPcRelDexCacheArrayLoad()) { + size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg); + } else { + r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); + // Load dex cache entry into class_reg (kArg2) + LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + class_reg, kNotVolatile); + int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); + LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); + } if (!can_assume_type_is_in_dex_cache) { - LIR* slow_path_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); - LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); + GenIfNullUseHelperImmMethod(class_reg, kQuickInitializeType, type_idx, r_method); // Should load value here. LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref - - class InitTypeSlowPath : public Mir2Lir::LIRSlowPath { - public: - InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx_in, - RegLocation rl_src_in) - : LIRSlowPath(m2l, branch, cont), type_idx_(type_idx_in), - rl_src_(rl_src_in) { - } - - void Compile() OVERRIDE { - GenerateTargetLabel(); - - m2l_->CallRuntimeHelperImm(kQuickInitializeType, type_idx_, true); - m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kRef), - m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path - m2l_->OpUnconditionalBranch(cont_); - } - - private: - uint32_t type_idx_; - RegLocation rl_src_; - }; - - AddSlowPath(new (arena_) InitTypeSlowPath(this, slow_path_branch, slow_path_target, - type_idx, rl_src)); } } /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ @@ -1426,55 +1405,34 @@ void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx, FlushAllRegs(); // May generate a call - use explicit registers LockCallTemps(); - RegStorage method_reg = TargetReg(kArg1, kRef); - LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kRet0 // InitializeTypeAndVerifyAccess(idx, method) - CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true); + CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true); OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path } else if (use_declaring_class) { + RegStorage method_reg = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { // Load dex cache entry into class_reg (kArg2) - LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), - class_reg, kNotVolatile); - int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); - LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); + RegStorage r_method = RegStorage::InvalidReg(); + if (CanUseOpPcRelDexCacheArrayLoad()) { + size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg); + } else { + r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); + + LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + class_reg, kNotVolatile); + int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); + LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); + } if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { // Need to test presence of type in dex cache at runtime - LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); - LIR* cont = NewLIR0(kPseudoTargetLabel); - - // Slow path to initialize the type. Executed if the type is NULL. - class SlowPath : public LIRSlowPath { - public: - SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in, - const RegStorage class_reg_in) - : LIRSlowPath(m2l, fromfast, cont_in), - type_idx_(type_idx_in), class_reg_(class_reg_in) { - } - - void Compile() { - GenerateTargetLabel(); - - // Call out to helper, which will return resolved type in kArg0 - // InitializeTypeFromCode(idx, method) - m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, - m2l_->TargetReg(kArg1, kRef), true); - m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path - m2l_->OpUnconditionalBranch(cont_); - } - - public: - const int type_idx_; - const RegStorage class_reg_; - }; - - AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); + GenIfNullUseHelperImmMethod(class_reg, kQuickInitializeType, type_idx, r_method); } } // At this point, class_reg (kArg2) has class diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 2d41ba1..e747239 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -24,6 +24,7 @@ #include "dex/quick/dex_file_to_method_inliner_map.h" #include "dex_file-inl.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "entrypoints/quick/quick_entrypoints.h" #include "invoke_type.h" #include "mirror/array.h" diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index b71691f..54e5742 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -340,6 +340,20 @@ void Mir2Lir::LoadCurrMethodDirect(RegStorage r_tgt) { LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt); } +RegStorage Mir2Lir::LoadCurrMethodWithHint(RegStorage r_hint) { + // If the method is promoted to a register, return that register, otherwise load it to r_hint. + // (Replacement for LoadCurrMethod() usually used when LockCallTemps() is in effect.) + DCHECK(r_hint.Valid()); + RegLocation rl_method = mir_graph_->GetMethodLoc(); + if (rl_method.location == kLocPhysReg) { + DCHECK(!IsTemp(rl_method.reg)); + return rl_method.reg; + } else { + LoadCurrMethodDirect(r_hint); + return r_hint; + } +} + RegLocation Mir2Lir::LoadCurrMethod() { return LoadValue(mir_graph_->GetMethodLoc(), kRefReg); } diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index e573899..6cdf567 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -493,15 +493,14 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { /* Found a slot to hoist to */ if (slot >= 0) { LIR* cur_lir = prev_inst_list[slot]; - LIR* new_load_lir = - static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR)); - *new_load_lir = *this_lir; + LIR* prev_lir = PREV_LIR(this_lir); + UnlinkLIR(this_lir); /* * Insertion is guaranteed to succeed since check_lir * is never the first LIR on the list */ - InsertLIRBefore(cur_lir, new_load_lir); - NopLIR(this_lir); + InsertLIRBefore(cur_lir, this_lir); + this_lir = prev_lir; // Continue the loop with the next LIR. } } } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index cca4e5a..bb8fbae 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -32,6 +32,7 @@ #include "leb128.h" #include "safe_map.h" #include "utils/array_ref.h" +#include "utils/dex_cache_arrays_layout.h" #include "utils/stack_checks.h" namespace art { @@ -956,6 +957,7 @@ class Mir2Lir { // Shared by all targets - implemented in gen_loadstore.cc. RegLocation LoadCurrMethod(); void LoadCurrMethodDirect(RegStorage r_tgt); + RegStorage LoadCurrMethodWithHint(RegStorage r_hint); virtual LIR* LoadConstant(RegStorage r_dest, int value); // Natural word size. LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { @@ -1093,6 +1095,18 @@ class Mir2Lir { virtual void LoadClassType(const DexFile& dex_file, uint32_t type_idx, SpecialTargetRegister symbolic_reg); + // TODO: Support PC-relative dex cache array loads on all platforms and + // replace CanUseOpPcRelDexCacheArrayLoad() with dex_cache_arrays_layout_.Valid(). + virtual bool CanUseOpPcRelDexCacheArrayLoad() const; + + /* + * @brief Load an element of one of the dex cache arrays. + * @param dex_file the dex file associated with the target dex cache. + * @param offset the offset of the element in the fixed dex cache arrays' layout. + * @param r_dest the register where to load the element. + */ + virtual void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest); + // Routines that work for the generic case, but may be overriden by target. /* * @brief Compare memory to immediate, and branch if condition true. @@ -1596,7 +1610,6 @@ class Mir2Lir { */ virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); - protected: void ClobberBody(RegisterInfo* p); void SetCurrentDexPc(DexOffset dexpc) { current_dalvik_offset_ = dexpc; @@ -1669,6 +1682,16 @@ class Mir2Lir { */ bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); + /** + * @brief Generate code to check if result is null and, if it is, call helper to load it. + * @param r_result the result register. + * @param trampoline the helper to call in slow path. + * @param imm the immediate passed to the helper. + * @param r_method the register with ArtMethod* if available, otherwise RegStorage::Invalid(). + */ + void GenIfNullUseHelperImmMethod( + RegStorage r_result, QuickEntrypointEnum trampoline, int imm, RegStorage r_method); + void AddDivZeroCheckSlowPath(LIR* branch); // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using @@ -1815,7 +1838,9 @@ class Mir2Lir { // Record the MIR that generated a given safepoint (nullptr for prologue safepoints). ArenaVector<std::pair<LIR*, MIR*>> safepoints_; - protected: + // The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing. + const DexCacheArraysLayout dex_cache_arrays_layout_; + // ABI support class ShortyArg { public: diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index 1673312..d4ad0c2 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -635,6 +635,12 @@ CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item, instruction_set = kThumb2; } CompilationUnit cu(runtime->GetArenaPool(), instruction_set, driver, class_linker); + cu.dex_file = &dex_file; + cu.class_def_idx = class_def_idx; + cu.method_idx = method_idx; + cu.access_flags = access_flags; + cu.invoke_type = invoke_type; + cu.shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); CHECK((cu.instruction_set == kThumb2) || (cu.instruction_set == kArm64) || diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index abee872..e81228a 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -21,6 +21,7 @@ #include "base/logging.h" #include "dex/quick/mir_to_lir-inl.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "gc/accounting/card_table.h" #include "mirror/art_method.h" #include "mirror/object_array-inl.h" |