diff options
author | Vladimir Marko <vmarko@google.com> | 2015-04-20 20:11:30 +0100 |
---|---|---|
committer | Vladimir Marko <vmarko@google.com> | 2015-04-24 11:08:46 +0100 |
commit | 5ea536aa4a6414db01beaf6f8bd8cb9adc5cfc92 (patch) | |
tree | 27b3f1c148744452b26bd4841f0dabe0549c3d86 /compiler | |
parent | c5cb691ca6a746a193bfbe3525aafa7cbb281d40 (diff) | |
download | art-5ea536aa4a6414db01beaf6f8bd8cb9adc5cfc92.zip art-5ea536aa4a6414db01beaf6f8bd8cb9adc5cfc92.tar.gz art-5ea536aa4a6414db01beaf6f8bd8cb9adc5cfc92.tar.bz2 |
Remove ArtMethod* parameter from dex cache entry points.
Load the ArtMethod* using an optimized stack walk instead.
This reduces the size of the generated code.
Three of the entry points are called only from a slow-path
and the fourth (InitializeTypeAndVerifyAccess) is rare and
already slow enough that the one or two extra loads
(depending on whether we already have the ArtMethod* in a
register) are insignificant. And as we're starting to use
PC-relative addressing of the dex cache arrays (already
done by Quick for the boot image), having the ArtMethod* in
a register becomes less likely anyway.
Change-Id: Ib19b9d204e355e13bf386662a8b158178bf8ad28
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/dex/quick/gen_common.cc | 62 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 8 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 2 |
6 files changed, 23 insertions, 57 deletions
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index de5e041..0592c74 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -58,24 +58,19 @@ ALWAYS_INLINE static inline bool ForceSlowTypePath(CompilationUnit* cu) { return (cu->enable_debug & (1 << kDebugSlowTypePath)) != 0; } -void Mir2Lir::GenIfNullUseHelperImmMethod( - RegStorage r_result, QuickEntrypointEnum trampoline, int imm, RegStorage r_method) { +void Mir2Lir::GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm) { class CallHelperImmMethodSlowPath : public LIRSlowPath { public: CallHelperImmMethodSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, QuickEntrypointEnum trampoline_in, int imm_in, - RegStorage r_method_in, RegStorage r_result_in) + RegStorage r_result_in) : LIRSlowPath(m2l, fromfast, cont), trampoline_(trampoline_in), - imm_(imm_in), r_method_(r_method_in), r_result_(r_result_in) { + imm_(imm_in), r_result_(r_result_in) { } void Compile() { GenerateTargetLabel(); - if (r_method_.Valid()) { - m2l_->CallRuntimeHelperImmReg(trampoline_, imm_, r_method_, true); - } else { - m2l_->CallRuntimeHelperImmMethod(trampoline_, imm_, true); - } + m2l_->CallRuntimeHelperImm(trampoline_, imm_, true); m2l_->OpRegCopy(r_result_, m2l_->TargetReg(kRet0, kRef)); m2l_->OpUnconditionalBranch(cont_); } @@ -83,7 +78,6 @@ void Mir2Lir::GenIfNullUseHelperImmMethod( private: QuickEntrypointEnum trampoline_; const int imm_; - const RegStorage r_method_; const RegStorage r_result_; }; @@ -91,7 +85,7 @@ void Mir2Lir::GenIfNullUseHelperImmMethod( LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm, - r_method, r_result)); + r_result)); } RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& field_info, @@ -101,13 +95,12 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel FlushAllRegs(); RegStorage r_base = TargetReg(kArg0, kRef); LockTemp(r_base); - RegStorage r_method = RegStorage::InvalidReg(); // Loaded lazily, maybe in the slow-path. if (CanUseOpPcRelDexCacheArrayLoad()) { uint32_t offset = dex_cache_arrays_layout_.TypeOffset(field_info.StorageIndex()); OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base); } else { // Using fixed register to sync with possible call to runtime support. - r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); + RegStorage r_method = LoadCurrMethodWithHint(r_base); LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, kNotVolatile); int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); @@ -139,10 +132,10 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel // entry in the dex cache is null, and the "uninit" when the class is not yet initialized. // At least one will be non-null here, otherwise we wouldn't generate the slow path. StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, - RegStorage r_base_in, RegStorage r_method_in) + RegStorage r_base_in) : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont), second_branch_(unresolved != nullptr ? uninit : nullptr), - storage_index_(storage_index), r_base_(r_base_in), r_method_(r_method_in) { + storage_index_(storage_index), r_base_(r_base_in) { } void Compile() { @@ -150,14 +143,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel if (second_branch_ != nullptr) { second_branch_->target = target; } - if (r_method_.Valid()) { - // ArtMethod* was loaded in normal path - use it. - m2l_->CallRuntimeHelperImmReg(kQuickInitializeStaticStorage, storage_index_, r_method_, - true); - } else { - // ArtMethod* wasn't loaded in normal path - use a helper that loads it. - m2l_->CallRuntimeHelperImmMethod(kQuickInitializeStaticStorage, storage_index_, true); - } + m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true); // Copy helper's result into r_base, a no-op on all but MIPS. m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef)); @@ -170,17 +156,13 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel const int storage_index_; const RegStorage r_base_; - RegStorage r_method_; }; // The slow path is invoked if the r_base is null or the class pointed // to by it is not initialized. LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, - field_info.StorageIndex(), r_base, r_method)); - } - if (IsTemp(r_method)) { - FreeTemp(r_method); + field_info.StorageIndex(), r_base)); } return r_base; } @@ -1042,22 +1024,19 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { type_idx)) { // Call out to helper which resolves type and verifies access. // Resolved type returned in kRet0. - CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true); + CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true); rl_result = GetReturn(kRefReg); } else { rl_result = EvalLoc(rl_dest, kRefReg, true); // We don't need access checks, load type from dex cache - RegStorage r_method = RegStorage::InvalidReg(); if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg); } else { - RegLocation rl_method = LoadCurrMethod(); - CheckRegLocation(rl_method); - r_method = rl_method.reg; int32_t dex_cache_offset = mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); RegStorage res_reg = AllocTempRef(); + RegStorage r_method = LoadCurrMethodWithHint(res_reg); LoadRefDisp(r_method, dex_cache_offset, res_reg, kNotVolatile); int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); @@ -1067,7 +1046,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { type_idx) || ForceSlowTypePath(cu_)) { // Slow path, at runtime test if type is null and if so initialize FlushAllRegs(); - GenIfNullUseHelperImmMethod(rl_result.reg, kQuickInitializeType, type_idx, r_method); + GenIfNullUseHelperImm(rl_result.reg, kQuickInitializeType, type_idx); } } StoreValue(rl_dest, rl_result); @@ -1085,14 +1064,13 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { // Might call out to helper, which will return resolved string in kRet0 RegStorage ret0 = TargetReg(kRet0, kRef); - RegStorage r_method = RegStorage::InvalidReg(); if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx); OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0); } else { - r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); // Method to declaring class. RegStorage arg0 = TargetReg(kArg0, kRef); + RegStorage r_method = LoadCurrMethodWithHint(arg0); LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), arg0, kNotVolatile); // Declaring class to dex cache strings. @@ -1100,7 +1078,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { LoadRefDisp(arg0, offset_of_string, ret0, kNotVolatile); } - GenIfNullUseHelperImmMethod(ret0, kQuickResolveString, string_idx, r_method); + GenIfNullUseHelperImm(ret0, kQuickResolveString, string_idx); GenBarrier(); StoreValue(rl_dest, GetReturn(kRefReg)); @@ -1262,12 +1240,11 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref } - RegStorage r_method = RegStorage::InvalidReg(); if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg); } else { - r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); + RegStorage r_method = LoadCurrMethodWithHint(class_reg); // Load dex cache entry into class_reg (kArg2) LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); @@ -1275,7 +1252,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); } if (!can_assume_type_is_in_dex_cache) { - GenIfNullUseHelperImmMethod(class_reg, kQuickInitializeType, type_idx, r_method); + GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx); // Should load value here. LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref @@ -1394,12 +1371,11 @@ void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx, class_reg, kNotVolatile); } else { // Load dex cache entry into class_reg (kArg2) - RegStorage r_method = RegStorage::InvalidReg(); if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg); } else { - r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); + RegStorage r_method = LoadCurrMethodWithHint(class_reg); LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); @@ -1408,7 +1384,7 @@ void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx, } if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { // Need to test presence of type in dex cache at runtime - GenIfNullUseHelperImmMethod(class_reg, kQuickInitializeType, type_idx, r_method); + GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx); } } // At this point, class_reg (kArg2) has class diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 8f08a51..6f227fc 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -1692,10 +1692,8 @@ class Mir2Lir { * @param r_result the result register. * @param trampoline the helper to call in slow path. * @param imm the immediate passed to the helper. - * @param r_method the register with ArtMethod* if available, otherwise RegStorage::Invalid(). */ - void GenIfNullUseHelperImmMethod( - RegStorage r_result, QuickEntrypointEnum trampoline, int imm, RegStorage r_method); + void GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm); /** * @brief Generate code to retrieve Class* for another type to be used by SGET/SPUT. diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index ae1fb53..8589f94 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -176,7 +176,6 @@ class LoadClassSlowPathARM : public SlowPathCodeARM { InvokeRuntimeCallingConvention calling_convention; __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex()); - arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) : QUICK_ENTRY_POINT(pInitializeType); @@ -222,7 +221,6 @@ class LoadStringSlowPathARM : public SlowPathCodeARM { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex()); arm_codegen->InvokeRuntime( QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 1c6debd..2a2f07f 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -173,14 +173,13 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { InvokeRuntimeCallingConvention calling_convention; __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex()); - arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) : QUICK_ENTRY_POINT(pInitializeType); arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this); if (do_clinit_) { - CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>(); } else { - CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>(); } // Move the class to the desired location. @@ -225,11 +224,10 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex()); arm64_codegen->InvokeRuntime( QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>(); Primitive::Type type = instruction_->GetType(); arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index c604842..021e2eb 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -174,7 +174,6 @@ class LoadStringSlowPathX86 : public SlowPathCodeX86 { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex())); __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString))); RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); @@ -208,7 +207,6 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { InvokeRuntimeCallingConvention calling_convention; __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex())); - x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); __ fs()->call(Address::Absolute(do_clinit_ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage) : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType))); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 47425fb..0c97a6e 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -197,7 +197,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 { InvokeRuntimeCallingConvention calling_convention; __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex())); - x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1))); __ gs()->call(Address::Absolute((do_clinit_ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage) : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true)); @@ -244,7 +243,6 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1))); __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(instruction_->GetStringIndex())); __ gs()->call(Address::Absolute( |