diff options
51 files changed, 2281 insertions, 511 deletions
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index b9f9437..f9a05c2 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -121,13 +121,23 @@ void DexCompiler::Compile() { break; case Instruction::IPUT: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true); + break; + case Instruction::IPUT_BOOLEAN: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BOOLEAN_QUICK, true); + break; + case Instruction::IPUT_BYTE: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BYTE_QUICK, true); + break; + case Instruction::IPUT_CHAR: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_CHAR_QUICK, true); + break; + case Instruction::IPUT_SHORT: - // These opcodes have the same implementation in interpreter so group - // them under IPUT_QUICK. - CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true); + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_SHORT_QUICK, true); break; case Instruction::IPUT_WIDE: diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 963a586..a170614 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -1944,6 +1944,10 @@ uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) { case Instruction::IPUT_SHORT: case Instruction::IPUT_QUICK: case Instruction::IPUT_OBJECT_QUICK: + case Instruction::IPUT_BOOLEAN_QUICK: + case Instruction::IPUT_BYTE_QUICK: + case Instruction::IPUT_CHAR_QUICK: + case Instruction::IPUT_SHORT_QUICK: case Instruction::APUT: case Instruction::APUT_OBJECT: case Instruction::APUT_BOOLEAN: diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index e4a895e..1777e98 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -1704,13 +1704,13 @@ void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t } bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) { - ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0); + ArmOpcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0); RegLocation rl_src_i = info->args[0]; - RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); // result reg + RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg); + RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg); NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg()); - (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result); + IsWide(size) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result); return true; } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 3f22913..b2af298 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -524,11 +524,9 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { const RegStorage r_base_; }; -void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, - bool is_object) { +void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, OpSize size) { const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); - OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); if (!SLOW_FIELD_PATH && field_info.FastPut()) { DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); RegStorage r_base; @@ -587,37 +585,59 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, FreeTemp(r_method); } // rBase now holds static storage base - RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); - if (is_long_or_double) { + RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile()); + if (IsWide(size)) { rl_src = LoadValueWide(rl_src, reg_class); } else { rl_src = LoadValue(rl_src, reg_class); } - if (is_object) { + if (IsRef(size)) { StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, field_info.IsVolatile() ? kVolatile : kNotVolatile); } else { - StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size, + StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, size, field_info.IsVolatile() ? kVolatile : kNotVolatile); } - if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { + if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) { MarkGCCard(rl_src.reg, r_base); } FreeTemp(r_base); } else { FlushAllRegs(); // Everything to home locations - QuickEntrypointEnum target = - is_long_or_double ? kQuickSet64Static - : (is_object ? kQuickSetObjStatic : kQuickSet32Static); + QuickEntrypointEnum target; + switch (size) { + case kReference: + target = kQuickSetObjStatic; + break; + case k64: + case kDouble: + target = kQuickSet64Static; + break; + case k32: + case kSingle: + target = kQuickSet32Static; + break; + case kSignedHalf: + case kUnsignedHalf: + target = kQuickSet16Static; + break; + case kSignedByte: + case kUnsignedByte: + target = kQuickSet8Static; + break; + case kWord: // Intentional fallthrough. + default: + LOG(FATAL) << "Can't determine entrypoint for: " << size; + target = kQuickSet32Static; + } CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true); } } -void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, - bool is_long_or_double, bool is_object) { +void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type) { const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); - OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); + if (!SLOW_FIELD_PATH && field_info.FastGet()) { DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); RegStorage r_base; @@ -668,33 +688,62 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, FreeTemp(r_method); } // r_base now holds static storage base - RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); + RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile()); RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); int field_offset = field_info.FieldOffset().Int32Value(); - if (is_object) { + if (IsRef(size)) { + // TODO: DCHECK? LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile : kNotVolatile); } else { - LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ? + LoadBaseDisp(r_base, field_offset, rl_result.reg, size, field_info.IsVolatile() ? kVolatile : kNotVolatile); } FreeTemp(r_base); - if (is_long_or_double) { + if (IsWide(size)) { StoreValueWide(rl_dest, rl_result); } else { StoreValue(rl_dest, rl_result); } } else { + DCHECK(SizeMatchesTypeForEntrypoint(size, type)); FlushAllRegs(); // Everything to home locations - QuickEntrypointEnum target = - is_long_or_double ? kQuickGet64Static - : (is_object ? kQuickGetObjStatic : kQuickGet32Static); + QuickEntrypointEnum target; + switch (type) { + case Primitive::kPrimNot: + target = kQuickGetObjStatic; + break; + case Primitive::kPrimLong: + case Primitive::kPrimDouble: + target = kQuickGet64Static; + break; + case Primitive::kPrimInt: + case Primitive::kPrimFloat: + target = kQuickGet32Static; + break; + case Primitive::kPrimShort: + target = kQuickGetShortStatic; + break; + case Primitive::kPrimChar: + target = kQuickGetCharStatic; + break; + case Primitive::kPrimByte: + target = kQuickGetByteStatic; + break; + case Primitive::kPrimBoolean: + target = kQuickGetBooleanStatic; + break; + case Primitive::kPrimVoid: // Intentional fallthrough. + default: + LOG(FATAL) << "Can't determine entrypoint for: " << type; + target = kQuickGet32Static; + } CallRuntimeHelperImm(target, field_info.FieldIndex(), true); // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp. - if (is_long_or_double) { + if (IsWide(size)) { RegLocation rl_result = GetReturnWide(kCoreReg); StoreValueWide(rl_dest, rl_result); } else { @@ -715,14 +764,12 @@ void Mir2Lir::HandleSlowPaths() { slow_paths_.Reset(); } -void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, - RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, - bool is_object) { +void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type, + RegLocation rl_dest, RegLocation rl_obj) { const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); - OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); if (!SLOW_FIELD_PATH && field_info.FastGet()) { - RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); + RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile()); // A load of the class will lead to an iget with offset 0. DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); rl_obj = LoadValue(rl_obj, kRefReg); @@ -730,29 +777,57 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); int field_offset = field_info.FieldOffset().Int32Value(); LIR* load_lir; - if (is_object) { + if (IsRef(size)) { load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile : kNotVolatile); } else { - load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size, + load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, size, field_info.IsVolatile() ? kVolatile : kNotVolatile); } MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir); - if (is_long_or_double) { + if (IsWide(size)) { StoreValueWide(rl_dest, rl_result); } else { StoreValue(rl_dest, rl_result); } } else { - QuickEntrypointEnum target = - is_long_or_double ? kQuickGet64Instance - : (is_object ? kQuickGetObjInstance : kQuickGet32Instance); + DCHECK(SizeMatchesTypeForEntrypoint(size, type)); + QuickEntrypointEnum target; + switch (type) { + case Primitive::kPrimNot: + target = kQuickGetObjInstance; + break; + case Primitive::kPrimLong: + case Primitive::kPrimDouble: + target = kQuickGet64Instance; + break; + case Primitive::kPrimFloat: + case Primitive::kPrimInt: + target = kQuickGet32Instance; + break; + case Primitive::kPrimShort: + target = kQuickGetShortInstance; + break; + case Primitive::kPrimChar: + target = kQuickGetCharInstance; + break; + case Primitive::kPrimByte: + target = kQuickGetByteInstance; + break; + case Primitive::kPrimBoolean: + target = kQuickGetBooleanInstance; + break; + case Primitive::kPrimVoid: // Intentional fallthrough. + default: + LOG(FATAL) << "Can't determine entrypoint for: " << type; + target = kQuickGet32Instance; + } // Second argument of pGetXXInstance is always a reference. DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U); CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true); // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp. - if (is_long_or_double) { + if (IsWide(size)) { RegLocation rl_result = GetReturnWide(kCoreReg); StoreValueWide(rl_dest, rl_result); } else { @@ -763,18 +838,16 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, } void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, - RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, - bool is_object) { + RegLocation rl_src, RegLocation rl_obj) { const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); - OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); if (!SLOW_FIELD_PATH && field_info.FastPut()) { - RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); + RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile()); // Dex code never writes to the class field. DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()), sizeof(mirror::HeapReference<mirror::Class>)); rl_obj = LoadValue(rl_obj, kRefReg); - if (is_long_or_double) { + if (IsWide(size)) { rl_src = LoadValueWide(rl_src, reg_class); } else { rl_src = LoadValue(rl_src, reg_class); @@ -782,21 +855,44 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, GenNullCheck(rl_obj.reg, opt_flags); int field_offset = field_info.FieldOffset().Int32Value(); LIR* store; - if (is_object) { + if (IsRef(size)) { store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ? kVolatile : kNotVolatile); } else { - store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size, + store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, size, field_info.IsVolatile() ? kVolatile : kNotVolatile); } MarkPossibleNullPointerExceptionAfter(opt_flags, store); - if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { + if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) { MarkGCCard(rl_src.reg, rl_obj.reg); } } else { - QuickEntrypointEnum target = - is_long_or_double ? kQuickSet64Instance - : (is_object ? kQuickSetObjInstance : kQuickSet32Instance); + QuickEntrypointEnum target; + switch (size) { + case kReference: + target = kQuickSetObjInstance; + break; + case k64: + case kDouble: + target = kQuickSet64Instance; + break; + case k32: + case kSingle: + target = kQuickSet32Instance; + break; + case kSignedHalf: + case kUnsignedHalf: + target = kQuickSet16Instance; + break; + case kSignedByte: + case kUnsignedByte: + target = kQuickSet8Instance; + break; + case kWord: // Intentional fallthrough. + default: + LOG(FATAL) << "Can't determine entrypoint for: " << size; + target = kQuickSet32Instance; + } CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src, true); } @@ -2096,4 +2192,28 @@ void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_s } } +bool Mir2Lir::SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type) { + switch (size) { + case kReference: + return type == Primitive::kPrimNot; + case k64: + case kDouble: + return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; + case k32: + case kSingle: + return type == Primitive::kPrimInt || type == Primitive::kPrimFloat; + case kSignedHalf: + return type == Primitive::kPrimShort; + case kUnsignedHalf: + return type == Primitive::kPrimChar; + case kSignedByte: + return type == Primitive::kPrimByte; + case kUnsignedByte: + return type == Primitive::kPrimBoolean; + case kWord: // Intentional fallthrough. + default: + return false; // There are no sane types with this op size. + } +} + } // namespace art diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 3cfc9a6..3fdbe20 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -1158,12 +1158,12 @@ bool Mir2Lir::GenInlinedReferenceGet(CallInfo* info) { // intrinsic logic start. RegLocation rl_obj = info->args[0]; - rl_obj = LoadValue(rl_obj); + rl_obj = LoadValue(rl_obj, kRefReg); RegStorage reg_slow_path = AllocTemp(); RegStorage reg_disabled = AllocTemp(); - Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path); - Load32Disp(reg_class, disable_flag_offset, reg_disabled); + Load8Disp(reg_class, slow_path_flag_offset, reg_slow_path); + Load8Disp(reg_class, disable_flag_offset, reg_disabled); FreeTemp(reg_class); LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled); FreeTemp(reg_disabled); @@ -1297,10 +1297,10 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) { return false; } RegLocation rl_src_i = info->args[0]; - RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg); - RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); // result reg + RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg); + RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - if (size == k64) { + if (IsWide(size)) { if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) { OpRegReg(kOpRev, rl_result.reg, rl_i.reg); StoreValueWide(rl_dest, rl_result); diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index c4dfcb9..3ec37f2 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -18,6 +18,7 @@ #include "dex/dataflow_iterator-inl.h" #include "dex/quick/dex_file_method_inliner.h" #include "mir_to_lir-inl.h" +#include "primitive.h" #include "thread-inl.h" namespace art { @@ -223,9 +224,27 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { return false; } - bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE)); - bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT)); - OpSize size = LoadStoreOpSize(wide, ref); + OpSize size = k32; + switch (data.op_variant) { + case InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT): + size = kReference; + break; + case InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE): + size = k64; + break; + case InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT): + size = kSignedHalf; + break; + case InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR): + size = kUnsignedHalf; + break; + case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE): + size = kSignedByte; + break; + case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN): + size = kUnsignedByte; + break; + } // Point of no return - no aborts after this GenPrintLabel(mir); @@ -233,20 +252,20 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { RegStorage reg_obj = LoadArg(data.object_arg, kRefReg); RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile); RegisterClass ret_reg_class = ShortyToRegClass(cu_->shorty[0]); - RegLocation rl_dest = wide ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class); + RegLocation rl_dest = IsWide(size) ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class); RegStorage r_result = rl_dest.reg; if (!RegClassMatches(reg_class, r_result)) { - r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class) - : AllocTypedTemp(rl_dest.fp, reg_class); + r_result = IsWide(size) ? AllocTypedTempWide(rl_dest.fp, reg_class) + : AllocTypedTemp(rl_dest.fp, reg_class); } - if (ref) { + if (IsRef(size)) { LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile); } else { LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile : kNotVolatile); } if (r_result.NotExactlyEquals(rl_dest.reg)) { - if (wide) { + if (IsWide(size)) { OpRegCopyWide(rl_dest.reg, r_result); } else { OpRegCopy(rl_dest.reg, r_result); @@ -267,24 +286,42 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) { return false; } - bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE)); - bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT)); - OpSize size = LoadStoreOpSize(wide, ref); + OpSize size = k32; + switch (data.op_variant) { + case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT): + size = kReference; + break; + case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE): + size = k64; + break; + case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT): + size = kSignedHalf; + break; + case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR): + size = kUnsignedHalf; + break; + case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE): + size = kSignedByte; + break; + case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN): + size = kUnsignedByte; + break; + } // Point of no return - no aborts after this GenPrintLabel(mir); LockArg(data.object_arg); - LockArg(data.src_arg, wide); + LockArg(data.src_arg, IsWide(size)); RegStorage reg_obj = LoadArg(data.object_arg, kRefReg); RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile); - RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide); - if (ref) { + RegStorage reg_src = LoadArg(data.src_arg, reg_class, IsWide(size)); + if (IsRef(size)) { StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile); } else { StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile : kNotVolatile); } - if (ref) { + if (IsRef(size)) { MarkGCCard(reg_src, reg_obj); } return true; @@ -720,84 +757,112 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list break; case Instruction::IGET_OBJECT: - GenIGet(mir, opt_flags, kReference, rl_dest, rl_src[0], false, true); + GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]); break; case Instruction::IGET_WIDE: - GenIGet(mir, opt_flags, k64, rl_dest, rl_src[0], true, false); + // kPrimLong and kPrimDouble share the same entrypoints. + GenIGet(mir, opt_flags, k64, Primitive::kPrimLong, rl_dest, rl_src[0]); break; case Instruction::IGET: - GenIGet(mir, opt_flags, k32, rl_dest, rl_src[0], false, false); + GenIGet(mir, opt_flags, k32, Primitive::kPrimInt, rl_dest, rl_src[0]); break; case Instruction::IGET_CHAR: - GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false); + GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]); break; case Instruction::IGET_SHORT: - GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false); + GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]); break; case Instruction::IGET_BOOLEAN: + GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]); + break; + case Instruction::IGET_BYTE: - GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false); + GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]); break; case Instruction::IPUT_WIDE: - GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1], true, false); + GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1]); break; case Instruction::IPUT_OBJECT: - GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1], false, true); + GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]); break; case Instruction::IPUT: - GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1], false, false); + GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1]); break; - case Instruction::IPUT_BOOLEAN: case Instruction::IPUT_BYTE: - GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false); + case Instruction::IPUT_BOOLEAN: + GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]); break; case Instruction::IPUT_CHAR: - GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false); + GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]); break; case Instruction::IPUT_SHORT: - GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false); + GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]); break; case Instruction::SGET_OBJECT: - GenSget(mir, rl_dest, false, true); + GenSget(mir, rl_dest, kReference, Primitive::kPrimNot); break; + case Instruction::SGET: - case Instruction::SGET_BOOLEAN: - case Instruction::SGET_BYTE: + GenSget(mir, rl_dest, k32, Primitive::kPrimInt); + break; + case Instruction::SGET_CHAR: + GenSget(mir, rl_dest, kUnsignedHalf, Primitive::kPrimChar); + break; + case Instruction::SGET_SHORT: - GenSget(mir, rl_dest, false, false); + GenSget(mir, rl_dest, kSignedHalf, Primitive::kPrimShort); + break; + + case Instruction::SGET_BOOLEAN: + GenSget(mir, rl_dest, kUnsignedByte, Primitive::kPrimBoolean); + break; + + case Instruction::SGET_BYTE: + GenSget(mir, rl_dest, kSignedByte, Primitive::kPrimByte); break; case Instruction::SGET_WIDE: - GenSget(mir, rl_dest, true, false); + // kPrimLong and kPrimDouble share the same entrypoints. + GenSget(mir, rl_dest, k64, Primitive::kPrimLong); break; case Instruction::SPUT_OBJECT: - GenSput(mir, rl_src[0], false, true); + GenSput(mir, rl_src[0], kReference); break; case Instruction::SPUT: - case Instruction::SPUT_BOOLEAN: + GenSput(mir, rl_src[0], k32); + break; + case Instruction::SPUT_BYTE: + case Instruction::SPUT_BOOLEAN: + GenSput(mir, rl_src[0], kUnsignedByte); + break; + case Instruction::SPUT_CHAR: + GenSput(mir, rl_src[0], kUnsignedHalf); + break; + case Instruction::SPUT_SHORT: - GenSput(mir, rl_src[0], false, false); + GenSput(mir, rl_src[0], kSignedHalf); break; + case Instruction::SPUT_WIDE: - GenSput(mir, rl_src[0], true, false); + GenSput(mir, rl_src[0], k64); break; case Instruction::INVOKE_STATIC_RANGE: diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 64ef48d..d101a13 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -831,14 +831,14 @@ class Mir2Lir : public Backend { void GenNewArray(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); void GenFilledNewArray(CallInfo* info); - void GenSput(MIR* mir, RegLocation rl_src, - bool is_long_or_double, bool is_object); - void GenSget(MIR* mir, RegLocation rl_dest, - bool is_long_or_double, bool is_object); - void GenIGet(MIR* mir, int opt_flags, OpSize size, - RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); + void GenSput(MIR* mir, RegLocation rl_src, OpSize size); + // Get entrypoints are specific for types, size alone is not sufficient to safely infer + // entrypoint. + void GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type); + void GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type, + RegLocation rl_dest, RegLocation rl_obj); void GenIPut(MIR* mir, int opt_flags, OpSize size, - RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object); + RegLocation rl_src, RegLocation rl_obj); void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src); @@ -978,6 +978,10 @@ class Mir2Lir : public Backend { virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile); } + // Load 8 bits, regardless of target. + virtual LIR* Load8Disp(RegStorage r_base, int displacement, RegStorage r_dest) { + return LoadBaseDisp(r_base, displacement, r_dest, kSignedByte, kNotVolatile); + } // Load 32 bits, regardless of target. virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile); @@ -1149,6 +1153,14 @@ class Mir2Lir : public Backend { (info1->StorageMask() & info2->StorageMask()) != 0); } + static constexpr bool IsWide(OpSize size) { + return size == k64 || size == kDouble; + } + + static constexpr bool IsRef(OpSize size) { + return size == kReference; + } + /** * @brief Portable way of getting special registers from the backend. * @param reg Enumeration describing the purpose of the register. @@ -1483,10 +1495,6 @@ class Mir2Lir : public Backend { */ virtual RegLocation ForceTempWide(RegLocation loc); - static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) { - return wide ? k64 : ref ? kReference : k32; - } - virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); @@ -1724,6 +1732,9 @@ class Mir2Lir : public Backend { // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache // to deduplicate the masks. ResourceMaskCache mask_cache_; + + private: + static bool SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type); }; // Class Mir2Lir } // namespace art diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index 4fea1f0..9691864 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -387,10 +387,10 @@ static int kAllOpcodes[] = { Instruction::IPUT_OBJECT_QUICK, Instruction::INVOKE_VIRTUAL_QUICK, Instruction::INVOKE_VIRTUAL_RANGE_QUICK, - Instruction::UNUSED_EB, - Instruction::UNUSED_EC, - Instruction::UNUSED_ED, - Instruction::UNUSED_EE, + Instruction::IPUT_BOOLEAN_QUICK, + Instruction::IPUT_BYTE_QUICK, + Instruction::IPUT_CHAR_QUICK, + Instruction::IPUT_SHORT_QUICK, Instruction::UNUSED_EF, Instruction::UNUSED_F0, Instruction::UNUSED_F1, diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index a48613f..f159beb 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -875,6 +875,17 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r // StoreBaseDisp() will emit correct insn for atomic store on x86 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore(). + // x86 only allows registers EAX-EDX to be used as byte registers, if the input src is not + // valid, allocate a temp. + bool allocated_temp = false; + if (size == kUnsignedByte || size == kSignedByte) { + if (!cu_->target64 && !r_src.Low4()) { + RegStorage r_input = r_src; + r_src = AllocateByteRegister(); + OpRegCopy(r_src, r_input); + allocated_temp = true; + } + } LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size); @@ -884,6 +895,10 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r GenMemBarrier(kAnyAny); } + if (allocated_temp) { + FreeTemp(r_src); + } + return store; } diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 28710e0..e858a7b 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -188,7 +188,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) { EXPECT_EQ(84U, sizeof(OatHeader)); EXPECT_EQ(8U, sizeof(OatMethodOffsets)); EXPECT_EQ(24U, sizeof(OatQuickMethodHeader)); - EXPECT_EQ(79 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints)); + EXPECT_EQ(91 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints)); } TEST_F(OatTest, OatHeaderIsValid) { diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 447003c..88c73d2 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -939,15 +939,26 @@ class ImageDumper { StackHandleScope<1> hs(Thread::Current()); FieldHelper fh(hs.NewHandle(field)); mirror::Class* type = fh.GetType(); + DCHECK(type->IsPrimitive()); if (type->IsPrimitiveLong()) { os << StringPrintf("%" PRId64 " (0x%" PRIx64 ")\n", field->Get64(obj), field->Get64(obj)); } else if (type->IsPrimitiveDouble()) { os << StringPrintf("%f (%a)\n", field->GetDouble(obj), field->GetDouble(obj)); } else if (type->IsPrimitiveFloat()) { os << StringPrintf("%f (%a)\n", field->GetFloat(obj), field->GetFloat(obj)); - } else { - DCHECK(type->IsPrimitive()); + } else if (type->IsPrimitiveInt()) { os << StringPrintf("%d (0x%x)\n", field->Get32(obj), field->Get32(obj)); + } else if (type->IsPrimitiveChar()) { + os << StringPrintf("%u (0x%x)\n", field->GetChar(obj), field->GetChar(obj)); + } else if (type->IsPrimitiveShort()) { + os << StringPrintf("%d (0x%x)\n", field->GetShort(obj), field->GetShort(obj)); + } else if (type->IsPrimitiveBoolean()) { + os << StringPrintf("%s (0x%x)\n", field->GetBoolean(obj)? "true" : "false", + field->GetBoolean(obj)); + } else if (type->IsPrimitiveByte()) { + os << StringPrintf("%d (0x%x)\n", field->GetByte(obj), field->GetByte(obj)); + } else { + LOG(FATAL) << "Unknown type: " << PrettyClass(type); } } else { // Get the value, don't compute the type unless it is non-null as we don't want diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index 8c6afd6..38a88c5 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -48,12 +48,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); extern "C" void* art_quick_resolve_string(void*, uint32_t); // Field entrypoints. +extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t); +extern "C" int art_quick_set8_static(uint32_t, int8_t); +extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t); +extern "C" int art_quick_set16_static(uint32_t, int16_t); extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); extern "C" int art_quick_set32_static(uint32_t, int32_t); extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); extern "C" int art_quick_set64_static(uint32_t, int64_t); extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*); +extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_static(uint32_t); +extern "C" uint8_t art_quick_get_boolean_static(uint32_t); +extern "C" int16_t art_quick_get_short_instance(uint32_t, void*); +extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*); +extern "C" int16_t art_quick_get_short_static(uint32_t); +extern "C" uint16_t art_quick_get_char_static(uint32_t); extern "C" int32_t art_quick_get32_instance(uint32_t, void*); extern "C" int32_t art_quick_get32_static(uint32_t); extern "C" int64_t art_quick_get64_instance(uint32_t, void*); @@ -154,15 +166,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pResolveString = art_quick_resolve_string; // Field + qpoints->pSet8Instance = art_quick_set8_instance; + qpoints->pSet8Static = art_quick_set8_static; + qpoints->pSet16Instance = art_quick_set16_instance; + qpoints->pSet16Static = art_quick_set16_static; qpoints->pSet32Instance = art_quick_set32_instance; qpoints->pSet32Static = art_quick_set32_static; qpoints->pSet64Instance = art_quick_set64_instance; qpoints->pSet64Static = art_quick_set64_static; qpoints->pSetObjInstance = art_quick_set_obj_instance; qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGetByteInstance = art_quick_get_byte_instance; + qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; + qpoints->pGetShortInstance = art_quick_get_short_instance; + qpoints->pGetCharInstance = art_quick_get_char_instance; qpoints->pGet32Instance = art_quick_get32_instance; qpoints->pGet64Instance = art_quick_get64_instance; qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGetByteStatic = art_quick_get_byte_static; + qpoints->pGetBooleanStatic = art_quick_get_boolean_static; + qpoints->pGetShortStatic = art_quick_get_short_static; + qpoints->pGetCharStatic = art_quick_get_char_static; qpoints->pGet32Static = art_quick_get32_static; qpoints->pGet64Static = art_quick_get64_static; qpoints->pGetObjStatic = art_quick_get_obj_static; diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index 1b30c9c..51bcd3c 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -203,6 +203,77 @@ ENTRY \c_name END \c_name .endm +.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg + ldr \reg, [r9, #THREAD_EXCEPTION_OFFSET] // Get exception field. + cbnz \reg, 1f + bx lr +1: + DELIVER_PENDING_EXCEPTION +.endm + +.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 + RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r1 +.endm + +.macro RETURN_IF_RESULT_IS_ZERO_OR_DELIVER + RETURN_IF_RESULT_IS_ZERO + DELIVER_PENDING_EXCEPTION +.endm + +// Macros taking opportunity of code similarities for downcalls with referrer for non-wide fields. +.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return + .extern \entrypoint +ENTRY \name + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r1, [sp, #32] @ pass referrer + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + \return +END \name +.endm + +.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return + .extern \entrypoint +ENTRY \name + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r2, [sp, #32] @ pass referrer + mov r3, r9 @ pass Thread::Current + mov r12, sp + str r12, [sp, #-16]! @ expand the frame and pass SP + .pad #16 + .cfi_adjust_cfa_offset 16 + bl \entrypoint @ (field_idx, Object*, referrer, Thread*, SP) + add sp, #16 @ strip the extra frame + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME + \return +END \name +.endm + +.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return + .extern \entrypoint +ENTRY \name + SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC + ldr r3, [sp, #32] @ pass referrer + mov r12, sp @ save SP + sub sp, #8 @ grow frame for alignment with stack args + .pad #8 + .cfi_adjust_cfa_offset 8 + push {r9, r12} @ pass Thread::Current and SP + .save {r9, r12} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r9, 0 + .cfi_rel_offset r12, 4 + bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*, SP) + add sp, #16 @ release out args + .cfi_adjust_cfa_offset -16 + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here + \return +END \name +.endm + /* * Called by managed code, saves callee saves and then calls artThrowException * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. @@ -601,23 +672,14 @@ ENTRY art_quick_initialize_type_and_verify_access END art_quick_initialize_type_and_verify_access /* - * Called by managed code to resolve a static field and load a 32-bit primitive value. + * Called by managed code to resolve a static field and load a non-wide value. */ - .extern artGet32StaticFromCode -ENTRY art_quick_get32_static - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r1, [sp, #32] @ pass referrer - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artGet32StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) - ldr r1, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cbnz r1, 1f @ success if no exception pending - bx lr @ return on success -1: - DELIVER_PENDING_EXCEPTION -END art_quick_get32_static - +ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 /* * Called by managed code to resolve a static field and load a 64-bit primitive value. */ @@ -637,43 +699,14 @@ ENTRY art_quick_get64_static END art_quick_get64_static /* - * Called by managed code to resolve a static field and load an object reference. + * Called by managed code to resolve an instance field and load a non-wide value. */ - .extern artGetObjStaticFromCode -ENTRY art_quick_get_obj_static - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r1, [sp, #32] @ pass referrer - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - bl artGetObjStaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP) - ldr r1, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cbnz r1, 1f @ success if no exception pending - bx lr @ return on success -1: - DELIVER_PENDING_EXCEPTION -END art_quick_get_obj_static - - /* - * Called by managed code to resolve an instance field and load a 32-bit primitive value. - */ - .extern artGet32InstanceFromCode -ENTRY art_quick_get32_instance - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - bl artGet32InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - ldr r1, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cbnz r1, 1f @ success if no exception pending - bx lr @ return on success -1: - DELIVER_PENDING_EXCEPTION -END art_quick_get32_instance - +TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 +TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1 /* * Called by managed code to resolve an instance field and load a 64-bit primitive value. */ @@ -698,48 +731,12 @@ ENTRY art_quick_get64_instance END art_quick_get64_instance /* - * Called by managed code to resolve an instance field and load an object reference. - */ - .extern artGetObjInstanceFromCode -ENTRY art_quick_get_obj_instance - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl artGetObjInstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - ldr r1, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - cbnz r1, 1f @ success if no exception pending - bx lr @ return on success -1: - DELIVER_PENDING_EXCEPTION -END art_quick_get_obj_instance - - /* - * Called by managed code to resolve a static field and store a 32-bit primitive value. + * Called by managed code to resolve a static field and store a non-wide value. */ - .extern artSet32StaticFromCode -ENTRY art_quick_set32_static - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl artSet32StaticFromCode @ (field_idx, new_val, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - RETURN_IF_RESULT_IS_ZERO - DELIVER_PENDING_EXCEPTION -END art_quick_set32_static - +TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER +TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER +TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER +TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER /* * Called by managed code to resolve a static field and store a 64-bit primitive value. * On entry r0 holds field index, r1:r2 hold new_val @@ -767,53 +764,16 @@ ENTRY art_quick_set64_static END art_quick_set64_static /* - * Called by managed code to resolve a static field and store an object reference. - */ - .extern artSetObjStaticFromCode -ENTRY art_quick_set_obj_static - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r2, [sp, #32] @ pass referrer - mov r3, r9 @ pass Thread::Current - mov r12, sp - str r12, [sp, #-16]! @ expand the frame and pass SP - .pad #16 - .cfi_adjust_cfa_offset 16 - bl artSetObjStaticFromCode @ (field_idx, new_val, referrer, Thread*, SP) - add sp, #16 @ strip the extra frame - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME - RETURN_IF_RESULT_IS_ZERO - DELIVER_PENDING_EXCEPTION -END art_quick_set_obj_static - - /* - * Called by managed code to resolve an instance field and store a 32-bit primitive value. + * Called by managed code to resolve an instance field and store a non-wide value. */ - .extern artSet32InstanceFromCode -ENTRY art_quick_set32_instance - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r3, [sp, #32] @ pass referrer - mov r12, sp @ save SP - sub sp, #8 @ grow frame for alignment with stack args - .pad #8 - .cfi_adjust_cfa_offset 8 - push {r9, r12} @ pass Thread::Current and SP - .save {r9, r12} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r9, 0 - .cfi_rel_offset r12, 4 - bl artSet32InstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP) - add sp, #16 @ release out args - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here - RETURN_IF_RESULT_IS_ZERO - DELIVER_PENDING_EXCEPTION -END art_quick_set32_instance - +THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER +THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER +THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER +THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER /* * Called by managed code to resolve an instance field and store a 64-bit primitive value. */ - .extern artSet32InstanceFromCode + .extern artSet64InstanceFromCode ENTRY art_quick_set64_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r12, sp @ save SP @@ -833,29 +793,6 @@ ENTRY art_quick_set64_instance END art_quick_set64_instance /* - * Called by managed code to resolve an instance field and store an object reference. - */ - .extern artSetObjInstanceFromCode -ENTRY art_quick_set_obj_instance - SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC - ldr r3, [sp, #32] @ pass referrer - mov r12, sp @ save SP - sub sp, #8 @ grow frame for alignment with stack args - .pad #8 - .cfi_adjust_cfa_offset 8 - push {r9, r12} @ pass Thread::Current and SP - .save {r9, r12} - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset r9, 0 - bl artSetObjInstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP) - add sp, #16 @ release out args - .cfi_adjust_cfa_offset -16 - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here - RETURN_IF_RESULT_IS_ZERO - DELIVER_PENDING_EXCEPTION -END art_quick_set_obj_instance - - /* * Entry from managed code to resolve a string, this stub will allocate a String and deliver an * exception on error. On success the String is returned. R0 holds the referring method, * R1 holds the string index. The fast path check for hit in strings cache has already been diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index 0c33d9c..70e93b3 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -47,12 +47,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); extern "C" void* art_quick_resolve_string(void*, uint32_t); // Field entrypoints. +extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t); +extern "C" int art_quick_set8_static(uint32_t, int8_t); +extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t); +extern "C" int art_quick_set16_static(uint32_t, int16_t); extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); extern "C" int art_quick_set32_static(uint32_t, int32_t); extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); extern "C" int art_quick_set64_static(uint32_t, int64_t); extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*); +extern "C" uint8_t art_quick_get_boolean_static(uint32_t); +extern "C" int8_t art_quick_get_byte_static(uint32_t); +extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*); +extern "C" int16_t art_quick_get_short_instance(uint32_t, void*); +extern "C" uint16_t art_quick_get_char_static(uint32_t); +extern "C" int16_t art_quick_get_short_static(uint32_t); extern "C" int32_t art_quick_get32_instance(uint32_t, void*); extern "C" int32_t art_quick_get32_static(uint32_t); extern "C" int64_t art_quick_get64_instance(uint32_t, void*); @@ -136,15 +148,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pResolveString = art_quick_resolve_string; // Field + qpoints->pSet8Instance = art_quick_set8_instance; + qpoints->pSet8Static = art_quick_set8_static; + qpoints->pSet16Instance = art_quick_set16_instance; + qpoints->pSet16Static = art_quick_set16_static; qpoints->pSet32Instance = art_quick_set32_instance; qpoints->pSet32Static = art_quick_set32_static; qpoints->pSet64Instance = art_quick_set64_instance; qpoints->pSet64Static = art_quick_set64_static; qpoints->pSetObjInstance = art_quick_set_obj_instance; qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; + qpoints->pGetByteInstance = art_quick_get_byte_instance; + qpoints->pGetCharInstance = art_quick_get_char_instance; + qpoints->pGetShortInstance = art_quick_get_short_instance; qpoints->pGet32Instance = art_quick_get32_instance; qpoints->pGet64Instance = art_quick_get64_instance; qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGetBooleanStatic = art_quick_get_boolean_static; + qpoints->pGetByteStatic = art_quick_get_byte_static; + qpoints->pGetCharStatic = art_quick_get_char_static; + qpoints->pGetShortStatic = art_quick_get_short_static; qpoints->pGet32Static = art_quick_get32_static; qpoints->pGet64Static = art_quick_get64_static; qpoints->pGetObjStatic = art_quick_get_obj_static; diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 2a19e27..606816a 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -1266,17 +1266,29 @@ TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorage TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO +ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 +TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER +TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER +THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER +THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index d3e7d5e..25e911d 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -49,12 +49,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); extern "C" void* art_quick_resolve_string(void*, uint32_t); // Field entrypoints. +extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t); +extern "C" int art_quick_set8_static(uint32_t, int8_t); +extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t); +extern "C" int art_quick_set16_static(uint32_t, int16_t); extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); extern "C" int art_quick_set32_static(uint32_t, int32_t); extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); extern "C" int art_quick_set64_static(uint32_t, int64_t); extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*); +extern "C" uint8_t art_quick_get_boolean_static(uint32_t); +extern "C" int8_t art_quick_get_byte_static(uint32_t); +extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*); +extern "C" int16_t art_quick_get_short_instance(uint32_t, void*); +extern "C" uint16_t art_quick_get_char_static(uint32_t); +extern "C" int16_t art_quick_get_short_static(uint32_t); extern "C" int32_t art_quick_get32_instance(uint32_t, void*); extern "C" int32_t art_quick_get32_static(uint32_t); extern "C" int64_t art_quick_get64_instance(uint32_t, void*); @@ -159,15 +171,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pResolveString = art_quick_resolve_string; // Field + qpoints->pSet8Instance = art_quick_set8_instance; + qpoints->pSet8Static = art_quick_set8_static; + qpoints->pSet16Instance = art_quick_set16_instance; + qpoints->pSet16Static = art_quick_set16_static; qpoints->pSet32Instance = art_quick_set32_instance; qpoints->pSet32Static = art_quick_set32_static; qpoints->pSet64Instance = art_quick_set64_instance; qpoints->pSet64Static = art_quick_set64_static; qpoints->pSetObjInstance = art_quick_set_obj_instance; qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; + qpoints->pGetByteInstance = art_quick_get_byte_instance; + qpoints->pGetCharInstance = art_quick_get_char_instance; + qpoints->pGetShortInstance = art_quick_get_short_instance; qpoints->pGet32Instance = art_quick_get32_instance; qpoints->pGet64Instance = art_quick_get64_instance; qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGetBooleanStatic = art_quick_get_boolean_static; + qpoints->pGetByteStatic = art_quick_get_byte_static; + qpoints->pGetCharStatic = art_quick_get_char_static; + qpoints->pGetShortStatic = art_quick_get_short_static; qpoints->pGet32Static = art_quick_get32_static; qpoints->pGet64Static = art_quick_get64_static; qpoints->pGetObjStatic = art_quick_get_obj_static; diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 8786222..9e9e523 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -739,6 +739,59 @@ ENTRY art_quick_initialize_type_and_verify_access move $a3, $sp # pass $sp RETURN_IF_RESULT_IS_NON_ZERO END art_quick_initialize_type_and_verify_access + /* + * Called by managed code to resolve a static field and load a boolean primitive value. + */ + .extern artGetBooleanStaticFromCode +ENTRY art_quick_get_boolean_static + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + move $a2, rSELF # pass Thread::Current + jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_boolean_static + /* + * Called by managed code to resolve a static field and load a byte primitive value. + */ + .extern artGetByteStaticFromCode +ENTRY art_quick_get_byte_static + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + move $a2, rSELF # pass Thread::Current + jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_byte_static + + /* + * Called by managed code to resolve a static field and load a char primitive value. + */ + .extern artGetCharStaticFromCode +ENTRY art_quick_get_char_static + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + move $a2, rSELF # pass Thread::Current + jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_char_static + /* + * Called by managed code to resolve a static field and load a short primitive value. + */ + .extern artGetShortStaticFromCode +ENTRY art_quick_get_short_static + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a1, 64($sp) # pass referrer's Method* + move $a2, rSELF # pass Thread::Current + jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) + move $a3, $sp # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_short_static /* * Called by managed code to resolve a static field and load a 32-bit primitive value. @@ -783,6 +836,60 @@ ENTRY art_quick_get_obj_static END art_quick_get_obj_static /* + * Called by managed code to resolve an instance field and load a boolean primitive value. + */ + .extern artGetBooleanInstanceFromCode +ENTRY art_quick_get_boolean_instance + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_boolean_instance + /* + * Called by managed code to resolve an instance field and load a byte primitive value. + */ + .extern artGetByteInstanceFromCode +ENTRY art_quick_get_byte_instance + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_byte_instance + + /* + * Called by managed code to resolve an instance field and load a char primitive value. + */ + .extern artGetCharInstanceFromCode +ENTRY art_quick_get_char_instance + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_char_instance + /* + * Called by managed code to resolve an instance field and load a short primitive value. + */ + .extern artGetShortInstanceFromCode +ENTRY art_quick_get_short_instance + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_NO_EXCEPTION +END art_quick_get_short_instance + + /* * Called by managed code to resolve an instance field and load a 32-bit primitive value. */ .extern artGet32InstanceFromCode @@ -825,6 +932,34 @@ ENTRY art_quick_get_obj_instance END art_quick_get_obj_instance /* + * Called by managed code to resolve a static field and store a 8-bit primitive value. + */ + .extern artSet8StaticFromCode +ENTRY art_quick_set8_static + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set8_static + + /* + * Called by managed code to resolve a static field and store a 16-bit primitive value. + */ + .extern artSet16StaticFromCode +ENTRY art_quick_set16_static + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a2, 64($sp) # pass referrer's Method* + move $a3, rSELF # pass Thread::Current + jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) + sw $sp, 16($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set16_static + + /* * Called by managed code to resolve a static field and store a 32-bit primitive value. */ .extern artSet32StaticFromCode @@ -841,7 +976,7 @@ END art_quick_set32_static /* * Called by managed code to resolve a static field and store a 64-bit primitive value. */ - .extern artSet32StaticFromCode + .extern artSet64StaticFromCode ENTRY art_quick_set64_static GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC @@ -867,6 +1002,34 @@ ENTRY art_quick_set_obj_static END art_quick_set_obj_static /* + * Called by managed code to resolve an instance field and store a 8-bit primitive value. + */ + .extern artSet8InstanceFromCode +ENTRY art_quick_set8_instance + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a3, 64($sp) # pass referrer's Method* + sw rSELF, 16($sp) # pass Thread::Current + jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) + sw $sp, 20($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set8_instance + + /* + * Called by managed code to resolve an instance field and store a 16-bit primitive value. + */ + .extern artSet16InstanceFromCode +ENTRY art_quick_set16_instance + GENERATE_GLOBAL_POINTER + SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC + lw $a3, 64($sp) # pass referrer's Method* + sw rSELF, 16($sp) # pass Thread::Current + jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) + sw $sp, 20($sp) # pass $sp + RETURN_IF_ZERO +END art_quick_set16_instance + + /* * Called by managed code to resolve an instance field and store a 32-bit primitive value. */ .extern artSet32InstanceFromCode @@ -883,7 +1046,7 @@ END art_quick_set32_instance /* * Called by managed code to resolve an instance field and store a 64-bit primitive value. */ - .extern artSet32InstanceFromCode + .extern artSet64InstanceFromCode ENTRY art_quick_set64_instance GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 864e3f7..1215d8b 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -1306,6 +1306,288 @@ TEST_F(StubTest, StringCompareTo) { } +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) +extern "C" void art_quick_set8_static(void); +extern "C" void art_quick_get_byte_static(void); +extern "C" void art_quick_get_boolean_static(void); +#endif + +static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, + mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 5; + uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + static_cast<size_t>(values[i]), + 0U, + StubTest::GetEntrypoint(self, kQuickSet8Static), + self, + referrer); + + size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + 0U, 0U, + StubTest::GetEntrypoint(self, kQuickGetBooleanStatic), + self, + referrer); + // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets. + EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i; + } +#else + LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} +static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, + mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 5; + int8_t values[num_values] = { -128, -64, 0, 64, 127 }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + static_cast<size_t>(values[i]), + 0U, + StubTest::GetEntrypoint(self, kQuickSet8Static), + self, + referrer); + + size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + 0U, 0U, + StubTest::GetEntrypoint(self, kQuickGetByteStatic), + self, + referrer); + EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i; + } +#else + LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} + + +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) +extern "C" void art_quick_set8_instance(void); +extern "C" void art_quick_get_byte_instance(void); +extern "C" void art_quick_get_boolean_instance(void); +#endif + +static void GetSetBooleanInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, + Thread* self, mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 5; + uint8_t values[num_values] = { 0, true, 2, 128, 0xFF }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + static_cast<size_t>(values[i]), + StubTest::GetEntrypoint(self, kQuickSet8Instance), + self, + referrer); + + uint8_t res = f->Get()->GetBoolean(obj->Get()); + EXPECT_EQ(values[i], res) << "Iteration " << i; + + f->Get()->SetBoolean<false>(obj->Get(), res); + + size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + 0U, + StubTest::GetEntrypoint(self, kQuickGetBooleanInstance), + self, + referrer); + EXPECT_EQ(res, static_cast<uint8_t>(res2)); + } +#else + LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} +static void GetSetByteInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, + Thread* self, mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 5; + int8_t values[num_values] = { -128, -64, 0, 64, 127 }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + static_cast<size_t>(values[i]), + StubTest::GetEntrypoint(self, kQuickSet8Instance), + self, + referrer); + + int8_t res = f->Get()->GetByte(obj->Get()); + EXPECT_EQ(res, values[i]) << "Iteration " << i; + f->Get()->SetByte<false>(obj->Get(), ++res); + + size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + 0U, + StubTest::GetEntrypoint(self, kQuickGetByteInstance), + self, + referrer); + EXPECT_EQ(res, static_cast<int8_t>(res2)); + } +#else + LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} + +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) +extern "C" void art_quick_set16_static(void); +extern "C" void art_quick_get_short_static(void); +extern "C" void art_quick_get_char_static(void); +#endif + +static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, + mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 6; + uint16_t values[num_values] = { 0, 1, 2, 255, 32768, 0xFFFF }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + static_cast<size_t>(values[i]), + 0U, + StubTest::GetEntrypoint(self, kQuickSet16Static), + self, + referrer); + + size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + 0U, 0U, + StubTest::GetEntrypoint(self, kQuickGetCharStatic), + self, + referrer); + + EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i; + } +#else + LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} +static void GetSetShortStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, + mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 6; + int16_t values[num_values] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + static_cast<size_t>(values[i]), + 0U, + StubTest::GetEntrypoint(self, kQuickSet16Static), + self, + referrer); + + size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + 0U, 0U, + StubTest::GetEntrypoint(self, kQuickGetShortStatic), + self, + referrer); + + EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i; + } +#else + LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} + +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) +extern "C" void art_quick_set16_instance(void); +extern "C" void art_quick_get_short_instance(void); +extern "C" void art_quick_get_char_instance(void); +#endif + +static void GetSetCharInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, + Thread* self, mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 6; + uint16_t values[num_values] = { 0, 1, 2, 255, 32768, 0xFFFF }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + static_cast<size_t>(values[i]), + StubTest::GetEntrypoint(self, kQuickSet16Instance), + self, + referrer); + + uint16_t res = f->Get()->GetChar(obj->Get()); + EXPECT_EQ(res, values[i]) << "Iteration " << i; + f->Get()->SetChar<false>(obj->Get(), ++res); + + size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + 0U, + StubTest::GetEntrypoint(self, kQuickGetCharInstance), + self, + referrer); + EXPECT_EQ(res, static_cast<uint16_t>(res2)); + } +#else + LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} +static void GetSetShortInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, + Thread* self, mirror::ArtMethod* referrer, StubTest* test) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) + constexpr size_t num_values = 6; + int16_t values[num_values] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE }; + + for (size_t i = 0; i < num_values; ++i) { + test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + static_cast<size_t>(values[i]), + StubTest::GetEntrypoint(self, kQuickSet16Instance), + self, + referrer); + + int16_t res = f->Get()->GetShort(obj->Get()); + EXPECT_EQ(res, values[i]) << "Iteration " << i; + f->Get()->SetShort<false>(obj->Get(), ++res); + + size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()), + reinterpret_cast<size_t>(obj->Get()), + 0U, + StubTest::GetEntrypoint(self, kQuickGetShortInstance), + self, + referrer); + EXPECT_EQ(res, static_cast<int16_t>(res2)); + } +#else + LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl; +#endif +} + +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) +extern "C" void art_quick_set32_static(void); +extern "C" void art_quick_get32_static(void); +#endif + static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -1555,6 +1837,26 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) Primitive::Type type = f->GetTypeAsPrimitiveType(); switch (type) { + case Primitive::Type::kPrimBoolean: + if (test_type == type) { + GetSetBooleanStatic(&obj, &f, self, m.Get(), test); + } + break; + case Primitive::Type::kPrimByte: + if (test_type == type) { + GetSetByteStatic(&obj, &f, self, m.Get(), test); + } + break; + case Primitive::Type::kPrimChar: + if (test_type == type) { + GetSetCharStatic(&obj, &f, self, m.Get(), test); + } + break; + case Primitive::Type::kPrimShort: + if (test_type == type) { + GetSetShortStatic(&obj, &f, self, m.Get(), test); + } + break; case Primitive::Type::kPrimInt: if (test_type == type) { GetSet32Static(&obj, &f, self, m.Get(), test); @@ -1590,6 +1892,26 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) Primitive::Type type = f->GetTypeAsPrimitiveType(); switch (type) { + case Primitive::Type::kPrimBoolean: + if (test_type == type) { + GetSetBooleanInstance(&obj, &f, self, m.Get(), test); + } + break; + case Primitive::Type::kPrimByte: + if (test_type == type) { + GetSetByteInstance(&obj, &f, self, m.Get(), test); + } + break; + case Primitive::Type::kPrimChar: + if (test_type == type) { + GetSetCharInstance(&obj, &f, self, m.Get(), test); + } + break; + case Primitive::Type::kPrimShort: + if (test_type == type) { + GetSetShortInstance(&obj, &f, self, m.Get(), test); + } + break; case Primitive::Type::kPrimInt: if (test_type == type) { GetSet32Instance(&obj, &f, self, m.Get(), test); @@ -1618,6 +1940,33 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) // TODO: Deallocate things. } +TEST_F(StubTest, Fields8) { + TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); + + Thread* self = Thread::Current(); + + self->TransitionFromSuspendedToRunnable(); + LoadDex("AllFields"); + bool started = runtime_->Start(); + CHECK(started); + + TestFields(self, this, Primitive::Type::kPrimBoolean); + TestFields(self, this, Primitive::Type::kPrimByte); +} + +TEST_F(StubTest, Fields16) { + TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); + + Thread* self = Thread::Current(); + + self->TransitionFromSuspendedToRunnable(); + LoadDex("AllFields"); + bool started = runtime_->Start(); + CHECK(started); + + TestFields(self, this, Primitive::Type::kPrimChar); + TestFields(self, this, Primitive::Type::kPrimShort); +} TEST_F(StubTest, Fields32) { TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index a072996..682c502 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -47,12 +47,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); extern "C" void* art_quick_resolve_string(void*, uint32_t); // Field entrypoints. +extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t); +extern "C" int art_quick_set8_static(uint32_t, int8_t); +extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t); +extern "C" int art_quick_set16_static(uint32_t, int16_t); extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); extern "C" int art_quick_set32_static(uint32_t, int32_t); extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); extern "C" int art_quick_set64_static(uint32_t, int64_t); extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*); +extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_static(uint32_t); +extern "C" uint8_t art_quick_get_boolean_static(uint32_t); +extern "C" int16_t art_quick_get_short_instance(uint32_t, void*); +extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*); +extern "C" int16_t art_quick_get_short_static(uint32_t); +extern "C" uint16_t art_quick_get_char_static(uint32_t); extern "C" int32_t art_quick_get32_instance(uint32_t, void*); extern "C" int32_t art_quick_get32_static(uint32_t); extern "C" int64_t art_quick_get64_instance(uint32_t, void*); @@ -137,15 +149,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pResolveString = art_quick_resolve_string; // Field + qpoints->pSet8Instance = art_quick_set8_instance; + qpoints->pSet8Static = art_quick_set8_static; + qpoints->pSet16Instance = art_quick_set16_instance; + qpoints->pSet16Static = art_quick_set16_static; qpoints->pSet32Instance = art_quick_set32_instance; qpoints->pSet32Static = art_quick_set32_static; qpoints->pSet64Instance = art_quick_set64_instance; qpoints->pSet64Static = art_quick_set64_static; qpoints->pSetObjInstance = art_quick_set_obj_instance; qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGetByteInstance = art_quick_get_byte_instance; + qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; + qpoints->pGetShortInstance = art_quick_get_short_instance; + qpoints->pGetCharInstance = art_quick_get_char_instance; qpoints->pGet32Instance = art_quick_get32_instance; qpoints->pGet64Instance = art_quick_get64_instance; qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGetByteStatic = art_quick_get_byte_static; + qpoints->pGetBooleanStatic = art_quick_get_boolean_static; + qpoints->pGetShortStatic = art_quick_get_short_static; + qpoints->pGetCharStatic = art_quick_get_char_static; qpoints->pGet32Static = art_quick_get32_static; qpoints->pGet64Static = art_quick_get64_static; qpoints->pGetObjStatic = art_quick_get_obj_static; diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index c6e704a..6166cb5 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -384,6 +384,48 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro) END_FUNCTION RAW_VAR(c_name, 0) END_MACRO +MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION RAW_VAR(c_name, 0) + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %edx // remember SP + mov 32(%esp), %ecx // get referrer + // Outgoing argument set up + PUSH edx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + PUSH ecx // pass referrer + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, referrer, Thread*, SP) + addl MACRO_LITERAL(16), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-16) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) // return or deliver exception + END_FUNCTION RAW_VAR(c_name, 0) +END_MACRO + +MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION RAW_VAR(c_name, 0) + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + mov 32(%esp), %edx // get referrer + subl LITERAL(12), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(12) + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + // Outgoing argument set up + PUSH edx // pass referrer + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call VAR(cxx_name, 1) // cxx_name(arg1, arg2, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-32) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) // return or deliver exception + END_FUNCTION RAW_VAR(c_name, 0) +END_MACRO + + MACRO0(RETURN_IF_RESULT_IS_NON_ZERO) testl %eax, %eax // eax == 0 ? jz 1f // if eax == 0 goto 1 @@ -814,6 +856,46 @@ DEFINE_FUNCTION art_quick_lushr ret END_FUNCTION art_quick_lushr +DEFINE_FUNCTION art_quick_set8_instance + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + subl LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + mov 32(%ebx), %ebx // get referrer + PUSH ebx // pass referrer + PUSH edx // pass new_val + PUSH ecx // pass object + PUSH eax // pass field_idx + call PLT_SYMBOL(artSet8InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-32) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set8_instance + +DEFINE_FUNCTION art_quick_set16_instance + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC + mov %esp, %ebx // remember SP + subl LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) + PUSH ebx // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + mov 32(%ebx), %ebx // get referrer + PUSH ebx // pass referrer + PUSH edx // pass new_val + PUSH ecx // pass object + PUSH eax // pass field_idx + call PLT_SYMBOL(artSet16InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP) + addl LITERAL(32), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-32) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set16_instance + DEFINE_FUNCTION art_quick_set32_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP @@ -873,8 +955,15 @@ DEFINE_FUNCTION art_quick_set_obj_instance RETURN_IF_EAX_ZERO // return or deliver exception END_FUNCTION art_quick_set_obj_instance -DEFINE_FUNCTION art_quick_get32_instance - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC +TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION + +DEFINE_FUNCTION art_quick_get64_instance + SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer subl LITERAL(12), %esp // alignment padding @@ -885,14 +974,14 @@ DEFINE_FUNCTION art_quick_get32_instance PUSH edx // pass referrer PUSH ecx // pass object PUSH eax // pass field_idx - call SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) + call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) addl LITERAL(32), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-32) RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get32_instance +END_FUNCTION art_quick_get64_instance -DEFINE_FUNCTION art_quick_get64_instance +DEFINE_FUNCTION art_quick_set8_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer @@ -902,16 +991,16 @@ DEFINE_FUNCTION art_quick_get64_instance pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) PUSH edx // pass referrer - PUSH ecx // pass object + PUSH ecx // pass new_val PUSH eax // pass field_idx - call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) + call SYMBOL(artSet8StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP) addl LITERAL(32), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-32) RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get64_instance + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set8_static -DEFINE_FUNCTION art_quick_get_obj_instance +DEFINE_FUNCTION art_quick_set16_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer @@ -921,14 +1010,14 @@ DEFINE_FUNCTION art_quick_get_obj_instance pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) PUSH edx // pass referrer - PUSH ecx // pass object + PUSH ecx // pass new_val PUSH eax // pass field_idx - call SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP) + call SYMBOL(artSet16StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP) addl LITERAL(32), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-32) RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get_obj_instance + RETURN_IF_EAX_ZERO // return or deliver exception +END_FUNCTION art_quick_set16_static DEFINE_FUNCTION art_quick_set32_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC @@ -987,53 +1076,13 @@ DEFINE_FUNCTION art_quick_set_obj_static RETURN_IF_EAX_ZERO // return or deliver exception END_FUNCTION art_quick_set_obj_static -DEFINE_FUNCTION art_quick_get32_static - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - mov 32(%esp), %ecx // get referrer - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass referrer - PUSH eax // pass field_idx - call SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP) - addl LITERAL(16), %esp // pop arguments - CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get32_static - -DEFINE_FUNCTION art_quick_get64_static - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - mov 32(%esp), %ecx // get referrer - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass referrer - PUSH eax // pass field_idx - call SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP) - addl LITERAL(16), %esp // pop arguments - CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get64_static - -DEFINE_FUNCTION art_quick_get_obj_static - SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC - mov %esp, %edx // remember SP - mov 32(%esp), %ecx // get referrer - PUSH edx // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass referrer - PUSH eax // pass field_idx - call SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP) - addl LITERAL(16), %esp // pop arguments - CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get_obj_static +ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION DEFINE_FUNCTION art_quick_proxy_invoke_handler SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index 35a0cf4..c9028e1 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -48,12 +48,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); extern "C" void* art_quick_resolve_string(void*, uint32_t); // Field entrypoints. +extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t); +extern "C" int art_quick_set8_static(uint32_t, int8_t); +extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t); +extern "C" int art_quick_set16_static(uint32_t, int16_t); extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); extern "C" int art_quick_set32_static(uint32_t, int32_t); extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); extern "C" int art_quick_set64_static(uint32_t, int64_t); extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*); +extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*); +extern "C" int8_t art_quick_get_byte_static(uint32_t); +extern "C" uint8_t art_quick_get_boolean_static(uint32_t); +extern "C" int16_t art_quick_get_short_instance(uint32_t, void*); +extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*); +extern "C" int16_t art_quick_get_short_static(uint32_t); +extern "C" uint16_t art_quick_get_char_static(uint32_t); extern "C" int32_t art_quick_get32_instance(uint32_t, void*); extern "C" int32_t art_quick_get32_static(uint32_t); extern "C" int64_t art_quick_get64_instance(uint32_t, void*); @@ -141,15 +153,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pResolveString = art_quick_resolve_string; // Field + qpoints->pSet8Instance = art_quick_set8_instance; + qpoints->pSet8Static = art_quick_set8_static; + qpoints->pSet16Instance = art_quick_set16_instance; + qpoints->pSet16Static = art_quick_set16_static; qpoints->pSet32Instance = art_quick_set32_instance; qpoints->pSet32Static = art_quick_set32_static; qpoints->pSet64Instance = art_quick_set64_instance; qpoints->pSet64Static = art_quick_set64_static; qpoints->pSetObjInstance = art_quick_set_obj_instance; qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGetByteInstance = art_quick_get_byte_instance; + qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; + qpoints->pGetShortInstance = art_quick_get_short_instance; + qpoints->pGetCharInstance = art_quick_get_char_instance; qpoints->pGet32Instance = art_quick_get32_instance; qpoints->pGet64Instance = art_quick_get64_instance; qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGetByteStatic = art_quick_get_byte_static; + qpoints->pGetBooleanStatic = art_quick_get_boolean_static; + qpoints->pGetShortStatic = art_quick_get_short_static; + qpoints->pGetCharStatic = art_quick_get_char_static; qpoints->pGet32Static = art_quick_get32_static; qpoints->pGet64Static = art_quick_get64_static; qpoints->pGetObjStatic = art_quick_get_obj_static; diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index f95bd22..e9b5a72 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -1076,17 +1076,29 @@ UNIMPLEMENTED art_quick_lshl UNIMPLEMENTED art_quick_lshr UNIMPLEMENTED art_quick_lushr +THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO +THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO +TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO +TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO +ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION +ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index dcf8f5f..9d85fa6 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -2078,6 +2078,8 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) { const byte* class_data = dex_file.GetClassData(dex_class_def); size_t num_ref = 0; + size_t num_8 = 0; + size_t num_16 = 0; size_t num_32 = 0; size_t num_64 = 0; if (class_data != NULL) { @@ -2085,16 +2087,33 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file, const DexFile::FieldId& field_id = dex_file.GetFieldId(it.GetMemberIndex()); const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id); char c = descriptor[0]; - if (c == 'L' || c == '[') { - num_ref++; - } else if (c == 'J' || c == 'D') { - num_64++; - } else { - num_32++; + switch (c) { + case 'L': + case '[': + num_ref++; + break; + case 'J': + case 'D': + num_64++; + break; + case 'I': + case 'F': + num_32++; + break; + case 'S': + case 'C': + num_16++; + break; + case 'B': + case 'Z': + num_8++; + break; + default: + LOG(FATAL) << "Unknown descriptor: " << c; } } } - return mirror::Class::ComputeClassSize(false, 0, num_32, num_64, num_ref); + return mirror::Class::ComputeClassSize(false, 0, num_8, num_16, num_32, num_64, num_ref); } OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, @@ -2455,6 +2474,89 @@ void ClassLinker::LinkCode(ConstHandle<mirror::ArtMethod> method, have_portable_code); } +template<int n> +void ClassLinker::AlignFields(size_t& current_field, const size_t num_fields, + MemberOffset& field_offset, + mirror::ObjectArray<mirror::ArtField>* fields, + std::deque<mirror::ArtField*>& grouped_and_sorted_fields) { + if (current_field != num_fields && !IsAligned<n>(field_offset.Uint32Value())) { + size_t gap = (n - (field_offset.Uint32Value() & (n - 1))); + // Avoid padding unless a field that requires alignment actually exists. + bool needs_padding = false; + for (size_t i = 0; i < grouped_and_sorted_fields.size(); ++i) { + mirror::ArtField* field = grouped_and_sorted_fields[i]; + Primitive::Type type = field->GetTypeAsPrimitiveType(); + CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types + // Too big to fill the gap. + if (Primitive::ComponentSize(type) >= n) { + needs_padding = true; + continue; + } + if (needs_padding) { + // Shift as many fields as possible to fill the gaps. + size_t cursor = i; + mirror::ArtField* shift_field; + Primitive::Type shift_type; + while (cursor < grouped_and_sorted_fields.size() && gap > 0) { + // Find field that can current in current gap. + do { + DCHECK_LT(cursor, grouped_and_sorted_fields.size()) << "Cursor overran fields."; + shift_field = grouped_and_sorted_fields[cursor]; + shift_type = shift_field->GetTypeAsPrimitiveType(); + CHECK(shift_type != Primitive::kPrimNot) << PrettyField(shift_field); + // Can fit. + if (Primitive::ComponentSize(shift_type) <= gap) { + break; + } + ++cursor; + } while (cursor < grouped_and_sorted_fields.size()); + + if (cursor < grouped_and_sorted_fields.size()) { + fields->Set<false>(current_field++, shift_field); + shift_field->SetOffset(field_offset); + field_offset = MemberOffset(field_offset.Uint32Value() + + Primitive::ComponentSize(shift_type)); + gap -= Primitive::ComponentSize(shift_type); + grouped_and_sorted_fields.erase(grouped_and_sorted_fields.begin() + cursor); + } + } + } + break; + } + // No further shuffling available, pad whatever space is left. + if (needs_padding) { + field_offset = MemberOffset(field_offset.Uint32Value() + gap); + } + DCHECK(!needs_padding || IsAligned<n>(field_offset.Uint32Value())) << "Needed " << + n << " byte alignment, but not aligned after align with offset: " << + field_offset.Uint32Value(); + } +} + +template<int n> +void ClassLinker::ShuffleForward(size_t ¤t_field, const size_t num_fields, + MemberOffset& field_offset, + mirror::ObjectArray<mirror::ArtField>* fields, + std::deque<mirror::ArtField*>& grouped_and_sorted_fields) { + while (!grouped_and_sorted_fields.empty() && current_field != num_fields) { + mirror::ArtField* field = grouped_and_sorted_fields.front(); + Primitive::Type type = field->GetTypeAsPrimitiveType(); + CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types + if (Primitive::ComponentSize(type) != n) { + DCHECK_LT(Primitive::ComponentSize(type), static_cast<unsigned int>(n)) << + "Encountered a larger field (" << Primitive::ComponentSize(type) << ") " << + "while shuffling fields of size: " << n; + // We should've shuffled all field of size n forward by this point. + break; + } + DCHECK(IsAligned<n>(field_offset.Uint32Value())); + grouped_and_sorted_fields.pop_front(); + fields->Set<false>(current_field++, field); + field->SetOffset(field_offset); + field_offset = MemberOffset(field_offset.Uint32Value() + n); + } +} + void ClassLinker::LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, ConstHandle<mirror::Class> klass, @@ -4674,20 +4776,20 @@ struct LinkFieldsComparator { // No thread safety analysis as will be called from STL. Checked lock held in constructor. bool operator()(mirror::ArtField* field1, mirror::ArtField* field2) NO_THREAD_SAFETY_ANALYSIS { - // First come reference fields, then 64-bit, and finally 32-bit + // First come reference fields, then 64-bit, then 32-bit, and then 16-bit, then finally 8-bit. Primitive::Type type1 = field1->GetTypeAsPrimitiveType(); Primitive::Type type2 = field2->GetTypeAsPrimitiveType(); if (type1 != type2) { bool is_primitive1 = type1 != Primitive::kPrimNot; bool is_primitive2 = type2 != Primitive::kPrimNot; - bool is64bit1 = is_primitive1 && (type1 == Primitive::kPrimLong || - type1 == Primitive::kPrimDouble); - bool is64bit2 = is_primitive2 && (type2 == Primitive::kPrimLong || - type2 == Primitive::kPrimDouble); - int order1 = !is_primitive1 ? 0 : (is64bit1 ? 1 : 2); - int order2 = !is_primitive2 ? 0 : (is64bit2 ? 1 : 2); - if (order1 != order2) { - return order1 < order2; + if (type1 != type2) { + if (is_primitive1 && is_primitive2) { + // Larger primitive types go first. + return Primitive::ComponentSize(type1) > Primitive::ComponentSize(type2); + } else { + // Reference always goes first. + return !is_primitive1; + } } } // same basic group? then sort by string. @@ -4709,7 +4811,7 @@ bool ClassLinker::LinkFields(ConstHandle<mirror::Class> klass, bool is_static, s if (klass->ShouldHaveEmbeddedImtAndVTable()) { // Static fields come after the embedded tables. base = mirror::Class::ComputeClassSize(true, klass->GetVTableDuringLinking()->GetLength(), - 0, 0, 0); + 0, 0, 0, 0, 0); } field_offset = MemberOffset(base); } else { @@ -4726,6 +4828,8 @@ bool ClassLinker::LinkFields(ConstHandle<mirror::Class> klass, bool is_static, s // we want a relatively stable order so that adding new fields // minimizes disruption of C++ version such as Class and Method. std::deque<mirror::ArtField*> grouped_and_sorted_fields; + const char* old_no_suspend_cause = Thread::Current()->StartAssertNoThreadSuspension( + "Naked ArtField references in deque"); for (size_t i = 0; i < num_fields; i++) { mirror::ArtField* f = fields->Get(i); CHECK(f != NULL) << PrettyClass(klass.Get()); @@ -4734,7 +4838,7 @@ bool ClassLinker::LinkFields(ConstHandle<mirror::Class> klass, bool is_static, s std::sort(grouped_and_sorted_fields.begin(), grouped_and_sorted_fields.end(), LinkFieldsComparator()); - // References should be at the front. + // References should be at the front, unless we need to pad. size_t current_field = 0; size_t num_reference_fields = 0; for (; current_field < num_fields; current_field++) { @@ -4751,44 +4855,21 @@ bool ClassLinker::LinkFields(ConstHandle<mirror::Class> klass, bool is_static, s field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(uint32_t)); } - // Now we want to pack all of the double-wide fields together. If - // we're not aligned, though, we want to shuffle one 32-bit field - // into place. If we can't find one, we'll have to pad it. - if (current_field != num_fields && !IsAligned<8>(field_offset.Uint32Value())) { - for (size_t i = 0; i < grouped_and_sorted_fields.size(); i++) { - mirror::ArtField* field = grouped_and_sorted_fields[i]; - Primitive::Type type = field->GetTypeAsPrimitiveType(); - CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types - if (type == Primitive::kPrimLong || type == Primitive::kPrimDouble) { - continue; - } - fields->Set<false>(current_field++, field); - field->SetOffset(field_offset); - // drop the consumed field - grouped_and_sorted_fields.erase(grouped_and_sorted_fields.begin() + i); - break; - } - // whether we found a 32-bit field for padding or not, we advance - field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(uint32_t)); - } + AlignFields<8>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); + ShuffleForward<8>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); + // No need for further alignment, start of object is 4-byte aligned. + ShuffleForward<4>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); + ShuffleForward<2>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); + ShuffleForward<1>(current_field, num_fields, field_offset, fields, grouped_and_sorted_fields); + CHECK(grouped_and_sorted_fields.empty()) << "Missed " << grouped_and_sorted_fields.size() << + " fields."; - // Alignment is good, shuffle any double-wide fields forward, and - // finish assigning field offsets to all fields. - DCHECK(current_field == num_fields || IsAligned<8>(field_offset.Uint32Value())) - << PrettyClass(klass.Get()); - while (!grouped_and_sorted_fields.empty()) { - mirror::ArtField* field = grouped_and_sorted_fields.front(); - grouped_and_sorted_fields.pop_front(); - Primitive::Type type = field->GetTypeAsPrimitiveType(); - CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types - fields->Set<false>(current_field, field); - field->SetOffset(field_offset); - field_offset = MemberOffset(field_offset.Uint32Value() + - ((type == Primitive::kPrimLong || type == Primitive::kPrimDouble) - ? sizeof(uint64_t) - : sizeof(uint32_t))); - current_field++; + // Subclass expects superclass to be 4 byte aligned at end. + if (!IsAligned<4>(field_offset.Uint32Value())) { + field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4)); } + CHECK(IsAligned<4>(field_offset.Uint32Value())); + Thread::Current()->EndAssertNoThreadSuspension(old_no_suspend_cause); // We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it. if (!is_static && klass->DescriptorEquals("Ljava/lang/ref/Reference;")) { diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 14a9e4a..67a7b23 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -17,6 +17,7 @@ #ifndef ART_RUNTIME_CLASS_LINKER_H_ #define ART_RUNTIME_CLASS_LINKER_H_ +#include <deque> #include <string> #include <utility> #include <vector> @@ -531,6 +532,18 @@ class ClassLinker { void LinkCode(ConstHandle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class, const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<int n> + void AlignFields(size_t& current_field, const size_t num_fields, + MemberOffset& field_offset, + mirror::ObjectArray<mirror::ArtField>* fields, + std::deque<mirror::ArtField*>& grouped_and_sorted_fields) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<int n> + void ShuffleForward(size_t& current_field, const size_t num_fields, + MemberOffset& field_offset, + mirror::ObjectArray<mirror::ArtField>* fields, + std::deque<mirror::ArtField*>& grouped_and_sorted_fields) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceInstanceOffsets(ConstHandle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index bb48be3..846216c 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -449,6 +449,10 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) { break; } case Instruction::IPUT_QUICK: + case Instruction::IPUT_BOOLEAN_QUICK: + case Instruction::IPUT_BYTE_QUICK: + case Instruction::IPUT_CHAR_QUICK: + case Instruction::IPUT_SHORT_QUICK: case Instruction::IPUT_WIDE_QUICK: case Instruction::IPUT_OBJECT_QUICK: { // Since we replaced the field index, we ask the verifier to tell us which diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h index 103b0d7..64c9185 100644 --- a/runtime/dex_instruction_list.h +++ b/runtime/dex_instruction_list.h @@ -253,10 +253,10 @@ V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \ V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \ - V(0xEB, UNUSED_EB, "unused-eb", k10x, false, kUnknown, 0, kVerifyError) \ - V(0xEC, UNUSED_EC, "unused-ec", k10x, false, kUnknown, 0, kVerifyError) \ - V(0xED, UNUSED_ED, "unused-ed", k10x, false, kUnknown, 0, kVerifyError) \ - V(0xEE, UNUSED_EE, "unused-ee", k10x, false, kUnknown, 0, kVerifyError) \ + V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kUnknown, 0, kVerifyError) \ + V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \ + V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \ + V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \ V(0xEF, UNUSED_EF, "unused-ef", k10x, false, kUnknown, 0, kVerifyError) \ V(0xF0, UNUSED_F0, "unused-f0", k10x, false, kUnknown, 0, kVerifyError) \ V(0xF1, UNUSED_F1, "unused-f1", k10x, false, kUnknown, 0, kVerifyError) \ diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index f858743..fbc7913 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -38,12 +38,24 @@ V(InitializeType, void*, uint32_t, void*) \ V(ResolveString, void*, void*, uint32_t) \ \ + V(Set8Instance, int, uint32_t, void*, int8_t) \ + V(Set8Static, int, uint32_t, int8_t) \ + V(Set16Instance, int, uint32_t, void*, int16_t) \ + V(Set16Static, int, uint32_t, int16_t) \ V(Set32Instance, int, uint32_t, void*, int32_t) \ V(Set32Static, int, uint32_t, int32_t) \ V(Set64Instance, int, uint32_t, void*, int64_t) \ V(Set64Static, int, uint32_t, int64_t) \ V(SetObjInstance, int, uint32_t, void*, void*) \ V(SetObjStatic, int, uint32_t, void*) \ + V(GetByteInstance, int8_t, uint32_t, void*) \ + V(GetBooleanInstance, uint8_t, uint32_t, void*) \ + V(GetByteStatic, int8_t, uint32_t) \ + V(GetBooleanStatic, uint8_t, uint32_t) \ + V(GetShortInstance, int16_t, uint32_t, void*) \ + V(GetCharInstance, uint16_t, uint32_t, void*) \ + V(GetShortStatic, int16_t, uint32_t) \ + V(GetCharStatic, uint16_t, uint32_t) \ V(Get32Instance, int32_t, uint32_t, void*) \ V(Get32Static, int32_t, uint32_t) \ V(Get64Instance, int64_t, uint32_t, void*) \ diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index cd1e247..b89c015 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -25,6 +25,74 @@ namespace art { +extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, + mirror::ArtMethod* referrer, + Thread* self, StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, + sizeof(int8_t)); + if (LIKELY(field != NULL)) { + return field->GetByte(field->GetDeclaringClass()); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t)); + if (LIKELY(field != NULL)) { + return field->GetByte(field->GetDeclaringClass()); + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, + mirror::ArtMethod* referrer, + Thread* self, StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, + sizeof(int8_t)); + if (LIKELY(field != NULL)) { + return field->GetBoolean(field->GetDeclaringClass()); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t)); + if (LIKELY(field != NULL)) { + return field->GetBoolean(field->GetDeclaringClass()); + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, + mirror::ArtMethod* referrer, + Thread* self, StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, + sizeof(int16_t)); + if (LIKELY(field != NULL)) { + return field->GetShort(field->GetDeclaringClass()); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t)); + if (LIKELY(field != NULL)) { + return field->GetShort(field->GetDeclaringClass()); + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx, + mirror::ArtMethod* referrer, + Thread* self, StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, + sizeof(int16_t)); + if (LIKELY(field != NULL)) { + return field->GetChar(field->GetDeclaringClass()); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t)); + if (LIKELY(field != NULL)) { + return field->GetChar(field->GetDeclaringClass()); + } + return 0; // Will throw exception by checking with Thread::Current +} + extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer, Thread* self, StackReference<mirror::ArtMethod>* sp) @@ -78,6 +146,97 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, return NULL; // Will throw exception by checking with Thread::Current } +extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, + sizeof(int8_t)); + if (LIKELY(field != NULL && obj != NULL)) { + return field->GetByte(obj); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self, + sizeof(int8_t)); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); + } else { + return field->GetByte(obj); + } + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, + sizeof(int8_t)); + if (LIKELY(field != NULL && obj != NULL)) { + return field->GetBoolean(obj); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self, + sizeof(int8_t)); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); + } else { + return field->GetBoolean(obj); + } + } + return 0; // Will throw exception by checking with Thread::Current +} +extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, + sizeof(int16_t)); + if (LIKELY(field != NULL && obj != NULL)) { + return field->GetShort(obj); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self, + sizeof(int16_t)); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); + } else { + return field->GetShort(obj); + } + } + return 0; // Will throw exception by checking with Thread::Current +} + +extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, + sizeof(int16_t)); + if (LIKELY(field != NULL && obj != NULL)) { + return field->GetChar(obj); + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self, + sizeof(int16_t)); + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, true); + } else { + return field->GetChar(obj); + } + } + return 0; // Will throw exception by checking with Thread::Current +} + extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, mirror::ArtMethod* referrer, Thread* self, StackReference<mirror::ArtMethod>* sp) @@ -148,6 +307,72 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror: return NULL; // Will throw exception by checking with Thread::Current } +extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, + sizeof(int8_t)); + if (LIKELY(field != NULL)) { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimBoolean) { + field->SetBoolean<false>(field->GetDeclaringClass(), new_value); + } else { + DCHECK_EQ(Primitive::kPrimByte, type); + field->SetByte<false>(field->GetDeclaringClass(), new_value); + } + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int8_t)); + if (LIKELY(field != NULL)) { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimBoolean) { + field->SetBoolean<false>(field->GetDeclaringClass(), new_value); + } else { + DCHECK_EQ(Primitive::kPrimByte, type); + field->SetByte<false>(field->GetDeclaringClass(), new_value); + } + return 0; // success + } + return -1; // failure +} + +extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, + sizeof(int16_t)); + if (LIKELY(field != NULL)) { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimChar) { + field->SetChar<false>(field->GetDeclaringClass(), new_value); + } else { + DCHECK_EQ(Primitive::kPrimShort, type); + field->SetShort<false>(field->GetDeclaringClass(), new_value); + } + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int16_t)); + if (LIKELY(field != NULL)) { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimChar) { + field->SetChar<false>(field->GetDeclaringClass(), new_value); + } else { + DCHECK_EQ(Primitive::kPrimShort, type); + field->SetShort<false>(field->GetDeclaringClass(), new_value); + } + return 0; // success + } + return -1; // failure +} + extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, mirror::ArtMethod* referrer, Thread* self, StackReference<mirror::ArtMethod>* sp) @@ -214,6 +439,91 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v return -1; // failure } +extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, + sizeof(int8_t)); + if (LIKELY(field != NULL && obj != NULL)) { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimBoolean) { + field->SetBoolean<false>(obj, new_value); + } else { + DCHECK_EQ(Primitive::kPrimByte, type); + field->SetByte<false>(obj, new_value); + } + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + { + StackHandleScope<1> hs(self); + HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj)); + field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self, + sizeof(int8_t)); + } + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); + } else { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimBoolean) { + field->SetBoolean<false>(obj, new_value); + } else { + field->SetByte<false>(obj, new_value); + } + return 0; // success + } + } + return -1; // failure +} + +extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value, + mirror::ArtMethod* referrer, Thread* self, + StackReference<mirror::ArtMethod>* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, + sizeof(int16_t)); + if (LIKELY(field != NULL && obj != NULL)) { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimChar) { + field->SetChar<false>(obj, new_value); + } else { + DCHECK_EQ(Primitive::kPrimShort, type); + field->SetShort<false>(obj, new_value); + } + return 0; // success + } + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); + { + StackHandleScope<1> hs(self); + HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj)); + field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self, + sizeof(int16_t)); + } + if (LIKELY(field != NULL)) { + if (UNLIKELY(obj == NULL)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionForFieldAccess(throw_location, field, false); + } else { + Primitive::Type type = field->GetTypeAsPrimitiveType(); + // Compiled code can't use transactional mode. + if (type == Primitive::kPrimChar) { + field->SetChar<false>(obj, new_value); + } else { + DCHECK_EQ(Primitive::kPrimShort, type); + field->SetShort<false>(obj, new_value); + } + return 0; // success + } + } + return -1; // failure +} + extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value, mirror::ArtMethod* referrer, Thread* self, StackReference<mirror::ArtMethod>* sp) diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc index f572d27..a3014b3 100644 --- a/runtime/entrypoints_order_test.cc +++ b/runtime/entrypoints_order_test.cc @@ -185,13 +185,25 @@ class EntrypointsOrderTest : public CommonRuntimeTest { EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveString, kPointerSize); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet32Instance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Instance, pSet8Static, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Static, pSet16Instance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Instance, pSet16Static, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Static, pSet32Instance, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Instance, pSet32Static, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Static, pSet64Instance, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Instance, pSet64Static, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Static, pSetObjInstance, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjInstance, pSetObjStatic, kPointerSize); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGet32Instance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGetByteInstance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteInstance, pGetBooleanInstance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanInstance, pGetByteStatic, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteStatic, pGetBooleanStatic, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanStatic, pGetShortInstance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortInstance, pGetCharInstance, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharInstance, pGetShortStatic, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortStatic, pGetCharStatic, kPointerSize); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharStatic, pGet32Instance, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Instance, pGet32Static, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Static, pGet64Instance, kPointerSize); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Instance, pGet64Static, kPointerSize); diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 74fa981..5724e35 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -32,7 +32,7 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead); const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c(); ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self, - Primitive::FieldSize(field_type)); + Primitive::ComponentSize(field_type)); if (UNLIKELY(f == nullptr)) { CHECK(self->IsExceptionPending()); return false; @@ -208,7 +208,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite); uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c(); ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self, - Primitive::FieldSize(field_type)); + Primitive::ComponentSize(field_type)); if (UNLIKELY(f == nullptr)) { CHECK(self->IsExceptionPending()); return false; @@ -346,6 +346,18 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1 } // Note: iput-x-quick instructions are only for non-volatile fields. switch (field_type) { + case Primitive::kPrimBoolean: + obj->SetFieldBoolean<transaction_active>(field_offset, shadow_frame.GetVReg(vregA)); + break; + case Primitive::kPrimByte: + obj->SetFieldByte<transaction_active>(field_offset, shadow_frame.GetVReg(vregA)); + break; + case Primitive::kPrimChar: + obj->SetFieldChar<transaction_active>(field_offset, shadow_frame.GetVReg(vregA)); + break; + case Primitive::kPrimShort: + obj->SetFieldShort<transaction_active>(field_offset, shadow_frame.GetVReg(vregA)); + break; case Primitive::kPrimInt: obj->SetField32<transaction_active>(field_offset, shadow_frame.GetVReg(vregA)); break; @@ -371,9 +383,13 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1 EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type, false); \ EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type, true); -EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimInt); // iget-quick. -EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimLong); // iget-wide-quick. -EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot); // iget-object-quick. +EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimInt); // iput-quick. +EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimBoolean); // iput-boolean-quick. +EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimByte); // iput-byte-quick. +EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimChar); // iput-char-quick. +EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimShort); // iput-short-quick. +EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimLong); // iput-wide-quick. +EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot); // iput-object-quick. #undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL #undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index e098ac8..755e1ed 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -1369,6 +1369,30 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } HANDLE_INSTRUCTION_END(); + HANDLE_INSTRUCTION_START(IPUT_BOOLEAN_QUICK) { + bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2); + } + HANDLE_INSTRUCTION_END(); + + HANDLE_INSTRUCTION_START(IPUT_BYTE_QUICK) { + bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2); + } + HANDLE_INSTRUCTION_END(); + + HANDLE_INSTRUCTION_START(IPUT_CHAR_QUICK) { + bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2); + } + HANDLE_INSTRUCTION_END(); + + HANDLE_INSTRUCTION_START(IPUT_SHORT_QUICK) { + bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2); + } + HANDLE_INSTRUCTION_END(); + HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) { bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data); POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2); @@ -2304,22 +2328,6 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* UnexpectedOpcode(inst, mh); HANDLE_INSTRUCTION_END(); - HANDLE_INSTRUCTION_START(UNUSED_EB) - UnexpectedOpcode(inst, mh); - HANDLE_INSTRUCTION_END(); - - HANDLE_INSTRUCTION_START(UNUSED_EC) - UnexpectedOpcode(inst, mh); - HANDLE_INSTRUCTION_END(); - - HANDLE_INSTRUCTION_START(UNUSED_ED) - UnexpectedOpcode(inst, mh); - HANDLE_INSTRUCTION_END(); - - HANDLE_INSTRUCTION_START(UNUSED_EE) - UnexpectedOpcode(inst, mh); - HANDLE_INSTRUCTION_END(); - HANDLE_INSTRUCTION_START(UNUSED_EF) UnexpectedOpcode(inst, mh); HANDLE_INSTRUCTION_END(); diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 5401495..6054a25 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -1266,6 +1266,30 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); break; } + case Instruction::IPUT_BOOLEAN_QUICK: { + PREAMBLE(); + bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); + break; + } + case Instruction::IPUT_BYTE_QUICK: { + PREAMBLE(); + bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); + break; + } + case Instruction::IPUT_CHAR_QUICK: { + PREAMBLE(); + bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); + break; + } + case Instruction::IPUT_SHORT_QUICK: { + PREAMBLE(); + bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data); + POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx); + break; + } case Instruction::IPUT_WIDE_QUICK: { PREAMBLE(); bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data); @@ -2164,7 +2188,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem inst = inst->Next_2xx(); break; case Instruction::UNUSED_3E ... Instruction::UNUSED_43: - case Instruction::UNUSED_EB ... Instruction::UNUSED_FF: + case Instruction::UNUSED_EF ... Instruction::UNUSED_FF: case Instruction::UNUSED_79: case Instruction::UNUSED_7A: UnexpectedOpcode(inst, mh); diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 2c0ea36..213dbc2 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -29,7 +29,7 @@ namespace mirror { inline uint32_t Array::ClassSize() { uint32_t vtable_entries = Object::kVTableLength; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0); + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); } template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> diff --git a/runtime/mirror/art_field-inl.h b/runtime/mirror/art_field-inl.h index 00bed92..d37fa41 100644 --- a/runtime/mirror/art_field-inl.h +++ b/runtime/mirror/art_field-inl.h @@ -31,7 +31,7 @@ namespace mirror { inline uint32_t ArtField::ClassSize() { uint32_t vtable_entries = Object::kVTableLength + 6; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0); + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); } inline Class* ArtField::GetDeclaringClass() { @@ -120,50 +120,64 @@ inline void ArtField::SetObj(Object* object, Object* new_value) { } } -inline bool ArtField::GetBoolean(Object* object) { - DCHECK_EQ(Primitive::kPrimBoolean, GetTypeAsPrimitiveType()) << PrettyField(this); - return Get32(object); +#define FIELD_GET(object, type) \ + DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \ + DCHECK(object != nullptr) << PrettyField(this); \ + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \ + if (UNLIKELY(IsVolatile())) { \ + return object->GetField ## type ## Volatile(GetOffset()); \ + } \ + return object->GetField ## type(GetOffset()); + +#define FIELD_SET(object, type, value) \ + DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \ + DCHECK(object != nullptr) << PrettyField(this); \ + DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \ + if (UNLIKELY(IsVolatile())) { \ + object->SetField ## type ## Volatile<kTransactionActive>(GetOffset(), value); \ + } else { \ + object->SetField ## type<kTransactionActive>(GetOffset(), value); \ + } + +inline uint8_t ArtField::GetBoolean(Object* object) { + FIELD_GET(object, Boolean); } template<bool kTransactionActive> -inline void ArtField::SetBoolean(Object* object, bool z) { - DCHECK_EQ(Primitive::kPrimBoolean, GetTypeAsPrimitiveType()) << PrettyField(this); - Set32<kTransactionActive>(object, z); +inline void ArtField::SetBoolean(Object* object, uint8_t z) { + FIELD_SET(object, Boolean, z); } inline int8_t ArtField::GetByte(Object* object) { - DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField(this); - return Get32(object); + FIELD_GET(object, Byte); } template<bool kTransactionActive> inline void ArtField::SetByte(Object* object, int8_t b) { - DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField(this); - Set32<kTransactionActive>(object, b); + FIELD_SET(object, Byte, b); } inline uint16_t ArtField::GetChar(Object* object) { - DCHECK_EQ(Primitive::kPrimChar, GetTypeAsPrimitiveType()) << PrettyField(this); - return Get32(object); + FIELD_GET(object, Char); } template<bool kTransactionActive> inline void ArtField::SetChar(Object* object, uint16_t c) { - DCHECK_EQ(Primitive::kPrimChar, GetTypeAsPrimitiveType()) << PrettyField(this); - Set32<kTransactionActive>(object, c); + FIELD_SET(object, Char, c); } inline int16_t ArtField::GetShort(Object* object) { - DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField(this); - return Get32(object); + FIELD_GET(object, Short); } template<bool kTransactionActive> inline void ArtField::SetShort(Object* object, int16_t s) { - DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField(this); - Set32<kTransactionActive>(object, s); + FIELD_SET(object, Short, s); } +#undef FIELD_GET +#undef FIELD_SET + inline int32_t ArtField::GetInt(Object* object) { if (kIsDebugBuild) { Primitive::Type type = GetTypeAsPrimitiveType(); @@ -273,7 +287,7 @@ inline bool ArtField::IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_loc } inline size_t ArtField::FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return Primitive::FieldSize(GetTypeAsPrimitiveType()); + return Primitive::ComponentSize(GetTypeAsPrimitiveType()); } inline mirror::DexCache* ArtField::GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h index f3dfa15..885bcb0 100644 --- a/runtime/mirror/art_field.h +++ b/runtime/mirror/art_field.h @@ -95,9 +95,9 @@ class MANAGED ArtField FINAL : public Object { void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // field access, null object for static fields - bool GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint8_t GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<bool kTransactionActive> - void SetBoolean(Object* object, bool z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetBoolean(Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int8_t GetByte(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<bool kTransactionActive> void SetByte(Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h index 06700e6..98ca64b 100644 --- a/runtime/mirror/art_method-inl.h +++ b/runtime/mirror/art_method-inl.h @@ -38,7 +38,7 @@ namespace mirror { inline uint32_t ArtMethod::ClassSize() { uint32_t vtable_entries = Object::kVTableLength + 8; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0); + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); } template<ReadBarrierOption kReadBarrierOption> diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index b0ff7ea..52dd0ee 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -556,6 +556,8 @@ inline Object* Class::AllocNonMovableObject(Thread* self) { inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, uint32_t num_vtable_entries, + uint32_t num_8bit_static_fields, + uint32_t num_16bit_static_fields, uint32_t num_32bit_static_fields, uint32_t num_64bit_static_fields, uint32_t num_ref_static_fields) { @@ -569,19 +571,39 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, sizeof(int32_t) /* vtable len */ + embedded_vtable_size; } + // Space used by reference statics. size += num_ref_static_fields * sizeof(HeapReference<Object>); - // Possible pad for alignment. - if (((size & 7) != 0) && (num_64bit_static_fields > 0)) { - size += sizeof(uint32_t); - if (num_32bit_static_fields != 0) { - // Shuffle one 32 bit static field forward. - num_32bit_static_fields--; + if (!IsAligned<8>(size) && num_64bit_static_fields > 0) { + uint32_t gap = 8 - (size & 0x7); + size += gap; // will be padded + // Shuffle 4-byte fields forward. + while (gap >= sizeof(uint32_t) && num_32bit_static_fields != 0) { + --num_32bit_static_fields; + gap -= sizeof(uint32_t); + } + // Shuffle 2-byte fields forward. + while (gap >= sizeof(uint16_t) && num_16bit_static_fields != 0) { + --num_16bit_static_fields; + gap -= sizeof(uint16_t); + } + // Shuffle byte fields forward. + while (gap >= sizeof(uint8_t) && num_8bit_static_fields != 0) { + --num_8bit_static_fields; + gap -= sizeof(uint8_t); } } + // Guaranteed to be at least 4 byte aligned. No need for further alignments. // Space used for primitive static fields. - size += (num_32bit_static_fields * sizeof(uint32_t)) + + size += (num_8bit_static_fields * sizeof(uint8_t)) + + (num_16bit_static_fields * sizeof(uint16_t)) + + (num_32bit_static_fields * sizeof(uint32_t)) + (num_64bit_static_fields * sizeof(uint64_t)); + // For now, the start of of subclass expects to be 4-byte aligned, pad end of object to ensure + // alignment. + if (!IsAligned<4>(size)) { + size = RoundUp(size, 4); + } return size; } @@ -705,11 +727,11 @@ inline MemberOffset Class::GetSlowPathFlagOffset() { } inline bool Class::GetSlowPathEnabled() { - return GetField32(GetSlowPathFlagOffset()); + return GetFieldBoolean(GetSlowPathFlagOffset()); } inline void Class::SetSlowPath(bool enabled) { - SetField32<false>(GetSlowPathFlagOffset(), enabled); + SetFieldBoolean<false>(GetSlowPathFlagOffset(), enabled); } inline void Class::InitializeClassVisitor::operator()( diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 81fbcab..0d30bc6 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -502,6 +502,8 @@ class MANAGED Class FINAL : public Object { // Compute how many bytes would be used a class with the given elements. static uint32_t ComputeClassSize(bool has_embedded_tables, uint32_t num_vtable_entries, + uint32_t num_8bit_static_fields, + uint32_t num_16bit_static_fields, uint32_t num_32bit_static_fields, uint32_t num_64bit_static_fields, uint32_t num_ref_static_fields); @@ -510,12 +512,12 @@ class MANAGED Class FINAL : public Object { static uint32_t ClassClassSize() { // The number of vtable entries in java.lang.Class. uint32_t vtable_entries = Object::kVTableLength + 64; - return ComputeClassSize(true, vtable_entries, 0, 1, 0); + return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0); } // The size of a java.lang.Class representing a primitive such as int.class. static uint32_t PrimitiveClassSize() { - return ComputeClassSize(false, 0, 0, 0, 0); + return ComputeClassSize(false, 0, 0, 0, 0, 0, 0); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index d3fcb55..288e88e 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -28,7 +28,7 @@ namespace mirror { inline uint32_t DexCache::ClassSize() { uint32_t vtable_entries = Object::kVTableLength + 1; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0); + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); } inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx) diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 9dbfb56..8c1dc7d 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -37,7 +37,7 @@ namespace mirror { inline uint32_t Object::ClassSize() { uint32_t vtable_entries = kVTableLength; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0); + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); } template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> @@ -408,17 +408,157 @@ inline size_t Object::SizeOf() { } template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile> -inline int32_t Object::GetField32(MemberOffset field_offset) { +inline uint8_t Object::GetFieldBoolean(MemberOffset field_offset) { if (kVerifyFlags & kVerifyThis) { VerifyObject(this); } - const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value(); - const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr); - if (UNLIKELY(kIsVolatile)) { - return reinterpret_cast<const Atomic<int32_t>*>(word_addr)->LoadSequentiallyConsistent(); - } else { - return reinterpret_cast<const Atomic<int32_t>*>(word_addr)->LoadJavaData(); + return GetField<uint8_t, kIsVolatile>(field_offset); +} + +template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile> +inline int8_t Object::GetFieldByte(MemberOffset field_offset) { + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + return GetField<int8_t, kIsVolatile>(field_offset); +} + +template<VerifyObjectFlags kVerifyFlags> +inline uint8_t Object::GetFieldBooleanVolatile(MemberOffset field_offset) { + return GetFieldBoolean<kVerifyFlags, true>(field_offset); +} + +template<VerifyObjectFlags kVerifyFlags> +inline int8_t Object::GetFieldByteVolatile(MemberOffset field_offset) { + return GetFieldByte<kVerifyFlags, true>(field_offset); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> +inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldBoolean(this, field_offset, + GetFieldBoolean<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + SetField<uint8_t, kIsVolatile>(field_offset, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> +inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldByte(this, field_offset, + GetFieldByte<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + SetField<int8_t, kIsVolatile>(field_offset, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline void Object::SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) { + return SetFieldBoolean<kTransactionActive, kCheckTransaction, kVerifyFlags, true>( + field_offset, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline void Object::SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) { + return SetFieldByte<kTransactionActive, kCheckTransaction, kVerifyFlags, true>( + field_offset, new_value); +} + +template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile> +inline uint16_t Object::GetFieldChar(MemberOffset field_offset) { + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + return GetField<uint16_t, kIsVolatile>(field_offset); +} + +template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile> +inline int16_t Object::GetFieldShort(MemberOffset field_offset) { + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); } + return GetField<int16_t, kIsVolatile>(field_offset); +} + +template<VerifyObjectFlags kVerifyFlags> +inline uint16_t Object::GetFieldCharVolatile(MemberOffset field_offset) { + return GetFieldChar<kVerifyFlags, true>(field_offset); +} + +template<VerifyObjectFlags kVerifyFlags> +inline int16_t Object::GetFieldShortVolatile(MemberOffset field_offset) { + return GetFieldShort<kVerifyFlags, true>(field_offset); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> +inline void Object::SetFieldChar(MemberOffset field_offset, uint16_t new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldChar(this, field_offset, + GetFieldChar<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + SetField<uint16_t, kIsVolatile>(field_offset, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> +inline void Object::SetFieldShort(MemberOffset field_offset, int16_t new_value) { + if (kCheckTransaction) { + DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); + } + if (kTransactionActive) { + Runtime::Current()->RecordWriteFieldChar(this, field_offset, + GetFieldShort<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); + } + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + SetField<int16_t, kIsVolatile>(field_offset, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline void Object::SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) { + return SetFieldChar<kTransactionActive, kCheckTransaction, kVerifyFlags, true>( + field_offset, new_value); +} + +template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> +inline void Object::SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) { + return SetFieldShort<kTransactionActive, kCheckTransaction, kVerifyFlags, true>( + field_offset, new_value); +} + +template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile> +inline int32_t Object::GetField32(MemberOffset field_offset) { + if (kVerifyFlags & kVerifyThis) { + VerifyObject(this); + } + return GetField<int32_t, kIsVolatile>(field_offset); } template<VerifyObjectFlags kVerifyFlags> @@ -440,13 +580,7 @@ inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) { if (kVerifyFlags & kVerifyThis) { VerifyObject(this); } - byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value(); - int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr); - if (kIsVolatile) { - reinterpret_cast<Atomic<int32_t>*>(word_addr)->StoreSequentiallyConsistent(new_value); - } else { - reinterpret_cast<Atomic<int32_t>*>(word_addr)->StoreJavaData(new_value); - } + SetField<int32_t, kIsVolatile>(field_offset, new_value); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> @@ -515,13 +649,7 @@ inline int64_t Object::GetField64(MemberOffset field_offset) { if (kVerifyFlags & kVerifyThis) { VerifyObject(this); } - const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value(); - const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr); - if (kIsVolatile) { - return reinterpret_cast<const Atomic<int64_t>*>(addr)->LoadSequentiallyConsistent(); - } else { - return reinterpret_cast<const Atomic<int64_t>*>(addr)->LoadJavaData(); - } + return GetField<int64_t, kIsVolatile>(field_offset); } template<VerifyObjectFlags kVerifyFlags> @@ -543,13 +671,7 @@ inline void Object::SetField64(MemberOffset field_offset, int64_t new_value) { if (kVerifyFlags & kVerifyThis) { VerifyObject(this); } - byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value(); - int64_t* addr = reinterpret_cast<int64_t*>(raw_addr); - if (kIsVolatile) { - reinterpret_cast<Atomic<int64_t>*>(addr)->StoreSequentiallyConsistent(new_value); - } else { - reinterpret_cast<Atomic<int64_t>*>(addr)->StoreJavaData(new_value); - } + SetField<int64_t, kIsVolatile>(field_offset, new_value); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> @@ -558,6 +680,28 @@ inline void Object::SetField64Volatile(MemberOffset field_offset, int64_t new_va new_value); } +template<typename kSize, bool kIsVolatile> +inline void Object::SetField(MemberOffset field_offset, kSize new_value) { + byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value(); + kSize* addr = reinterpret_cast<kSize*>(raw_addr); + if (kIsVolatile) { + reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value); + } else { + reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value); + } +} + +template<typename kSize, bool kIsVolatile> +inline kSize Object::GetField(MemberOffset field_offset) { + const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value(); + const kSize* addr = reinterpret_cast<const kSize*>(raw_addr); + if (kIsVolatile) { + return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent(); + } else { + return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData(); + } +} + template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) { diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index a6b6227..6cd230b 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -19,6 +19,7 @@ #include "object_reference.h" #include "offsets.h" +#include "runtime.h" #include "verify_object.h" namespace art { @@ -247,6 +248,78 @@ class MANAGED LOCKABLE Object { HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive, bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -356,6 +429,13 @@ class MANAGED LOCKABLE Object { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: + template<typename kSize, bool kIsVolatile> + ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<typename kSize, bool kIsVolatile> + ALWAYS_INLINE kSize GetField(MemberOffset field_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Verify the type correctness of stores to fields. // TODO: This can cause thread suspension and isn't moving GC safe. void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h index b353402..d1d2a3a 100644 --- a/runtime/mirror/reference-inl.h +++ b/runtime/mirror/reference-inl.h @@ -24,7 +24,7 @@ namespace mirror { inline uint32_t Reference::ClassSize() { uint32_t vtable_entries = Object::kVTableLength + 5; - return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0); + return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0); } inline bool Reference::IsEnqueuable() { diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index 6736497..f98407b 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -29,7 +29,7 @@ namespace mirror { inline uint32_t String::ClassSize() { uint32_t vtable_entries = Object::kVTableLength + 51; - return Class::ComputeClassSize(true, vtable_entries, 1, 1, 2); + return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2); } inline CharArray* String::GetCharArray() { diff --git a/runtime/oat.cc b/runtime/oat.cc index ede108c..43173ca 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -23,7 +23,7 @@ namespace art { const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' }; -const uint8_t OatHeader::kOatVersion[] = { '0', '3', '9', '\0' }; +const uint8_t OatHeader::kOatVersion[] = { '0', '4', '0', '\0' }; static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) { size_t estimate = 0U; diff --git a/runtime/primitive.h b/runtime/primitive.h index a36e9cb..36ad662 100644 --- a/runtime/primitive.h +++ b/runtime/primitive.h @@ -84,10 +84,6 @@ class Primitive { } } - static size_t FieldSize(Type type) { - return ComponentSize(type) <= 4 ? 4 : 8; - } - static const char* Descriptor(Type type) { switch (type) { case kPrimBoolean: diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 3e03c1a..3fc6ad5 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1207,6 +1207,34 @@ void Runtime::ExitTransactionMode() { preinitialization_transaction_ = nullptr; } +void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, + uint8_t value, bool is_volatile) const { + DCHECK(IsCompiler()); + DCHECK(IsActiveTransaction()); + preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile); +} + +void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, + int8_t value, bool is_volatile) const { + DCHECK(IsCompiler()); + DCHECK(IsActiveTransaction()); + preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile); +} + +void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, + uint16_t value, bool is_volatile) const { + DCHECK(IsCompiler()); + DCHECK(IsActiveTransaction()); + preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile); +} + +void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, + int16_t value, bool is_volatile) const { + DCHECK(IsCompiler()); + DCHECK(IsActiveTransaction()); + preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile); +} + void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, bool is_volatile) const { DCHECK(IsCompiler()); diff --git a/runtime/runtime.h b/runtime/runtime.h index b0a88d5..fc93d16 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -415,6 +415,14 @@ class Runtime { } void EnterTransactionMode(Transaction* transaction); void ExitTransactionMode(); + void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value, + bool is_volatile) const; + void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value, + bool is_volatile) const; + void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value, + bool is_volatile) const; + void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value, + bool is_volatile) const; void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, bool is_volatile) const; void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value, diff --git a/runtime/thread.cc b/runtime/thread.cc index 7ac685b..44b0ab4 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1860,12 +1860,24 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess) QUICK_ENTRY_POINT_INFO(pInitializeType) QUICK_ENTRY_POINT_INFO(pResolveString) + QUICK_ENTRY_POINT_INFO(pSet8Instance) + QUICK_ENTRY_POINT_INFO(pSet8Static) + QUICK_ENTRY_POINT_INFO(pSet16Instance) + QUICK_ENTRY_POINT_INFO(pSet16Static) QUICK_ENTRY_POINT_INFO(pSet32Instance) QUICK_ENTRY_POINT_INFO(pSet32Static) QUICK_ENTRY_POINT_INFO(pSet64Instance) QUICK_ENTRY_POINT_INFO(pSet64Static) QUICK_ENTRY_POINT_INFO(pSetObjInstance) QUICK_ENTRY_POINT_INFO(pSetObjStatic) + QUICK_ENTRY_POINT_INFO(pGetByteInstance) + QUICK_ENTRY_POINT_INFO(pGetBooleanInstance) + QUICK_ENTRY_POINT_INFO(pGetByteStatic) + QUICK_ENTRY_POINT_INFO(pGetBooleanStatic) + QUICK_ENTRY_POINT_INFO(pGetShortInstance) + QUICK_ENTRY_POINT_INFO(pGetCharInstance) + QUICK_ENTRY_POINT_INFO(pGetShortStatic) + QUICK_ENTRY_POINT_INFO(pGetCharStatic) QUICK_ENTRY_POINT_INFO(pGet32Instance) QUICK_ENTRY_POINT_INFO(pGet32Static) QUICK_ENTRY_POINT_INFO(pGet64Instance) diff --git a/runtime/transaction.cc b/runtime/transaction.cc index cc02a8d..0cfdfc5 100644 --- a/runtime/transaction.cc +++ b/runtime/transaction.cc @@ -57,6 +57,40 @@ Transaction::~Transaction() { } } +void Transaction::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, + uint8_t value, bool is_volatile) { + DCHECK(obj != nullptr); + MutexLock mu(Thread::Current(), log_lock_); + ObjectLog& object_log = object_logs_[obj]; + object_log.LogBooleanValue(field_offset, value, is_volatile); +} + +void Transaction::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, + int8_t value, bool is_volatile) { + DCHECK(obj != nullptr); + MutexLock mu(Thread::Current(), log_lock_); + ObjectLog& object_log = object_logs_[obj]; + object_log.LogByteValue(field_offset, value, is_volatile); +} + +void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, + uint16_t value, bool is_volatile) { + DCHECK(obj != nullptr); + MutexLock mu(Thread::Current(), log_lock_); + ObjectLog& object_log = object_logs_[obj]; + object_log.LogCharValue(field_offset, value, is_volatile); +} + + +void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, + int16_t value, bool is_volatile) { + DCHECK(obj != nullptr); + MutexLock mu(Thread::Current(), log_lock_); + ObjectLog& object_log = object_logs_[obj]; + object_log.LogShortValue(field_offset, value, is_volatile); +} + + void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, bool is_volatile) { DCHECK(obj != nullptr); @@ -223,35 +257,42 @@ void Transaction::VisitStringLogs(RootCallback* callback, void* arg) { } } +void Transaction::ObjectLog::LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile) { + LogValue(ObjectLog::kBoolean, offset, value, is_volatile); +} + +void Transaction::ObjectLog::LogByteValue(MemberOffset offset, int8_t value, bool is_volatile) { + LogValue(ObjectLog::kByte, offset, value, is_volatile); +} + +void Transaction::ObjectLog::LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile) { + LogValue(ObjectLog::kChar, offset, value, is_volatile); +} + +void Transaction::ObjectLog::LogShortValue(MemberOffset offset, int16_t value, bool is_volatile) { + LogValue(ObjectLog::kShort, offset, value, is_volatile); +} + void Transaction::ObjectLog::Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile) { - auto it = field_values_.find(offset.Uint32Value()); - if (it == field_values_.end()) { - ObjectLog::FieldValue field_value; - field_value.value = value; - field_value.is_volatile = is_volatile; - field_value.kind = ObjectLog::k32Bits; - field_values_.insert(std::make_pair(offset.Uint32Value(), field_value)); - } + LogValue(ObjectLog::k32Bits, offset, value, is_volatile); } void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile) { - auto it = field_values_.find(offset.Uint32Value()); - if (it == field_values_.end()) { - ObjectLog::FieldValue field_value; - field_value.value = value; - field_value.is_volatile = is_volatile; - field_value.kind = ObjectLog::k64Bits; - field_values_.insert(std::make_pair(offset.Uint32Value(), field_value)); - } + LogValue(ObjectLog::k64Bits, offset, value, is_volatile); } void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile) { + LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile); +} + +void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind, + MemberOffset offset, uint64_t value, bool is_volatile) { auto it = field_values_.find(offset.Uint32Value()); if (it == field_values_.end()) { ObjectLog::FieldValue field_value; - field_value.value = reinterpret_cast<uintptr_t>(obj); + field_value.value = value; field_value.is_volatile = is_volatile; - field_value.kind = ObjectLog::kReference; + field_value.kind = kind; field_values_.insert(std::make_pair(offset.Uint32Value(), field_value)); } } @@ -281,6 +322,42 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi // we'd need to disable the check. constexpr bool kCheckTransaction = true; switch (field_value.kind) { + case kBoolean: + if (UNLIKELY(field_value.is_volatile)) { + obj->SetFieldBooleanVolatile<false, kCheckTransaction>(field_offset, + static_cast<bool>(field_value.value)); + } else { + obj->SetFieldBoolean<false, kCheckTransaction>(field_offset, + static_cast<bool>(field_value.value)); + } + break; + case kByte: + if (UNLIKELY(field_value.is_volatile)) { + obj->SetFieldByteVolatile<false, kCheckTransaction>(field_offset, + static_cast<int8_t>(field_value.value)); + } else { + obj->SetFieldByte<false, kCheckTransaction>(field_offset, + static_cast<int8_t>(field_value.value)); + } + break; + case kChar: + if (UNLIKELY(field_value.is_volatile)) { + obj->SetFieldCharVolatile<false, kCheckTransaction>(field_offset, + static_cast<uint16_t>(field_value.value)); + } else { + obj->SetFieldChar<false, kCheckTransaction>(field_offset, + static_cast<uint16_t>(field_value.value)); + } + break; + case kShort: + if (UNLIKELY(field_value.is_volatile)) { + obj->SetFieldShortVolatile<false, kCheckTransaction>(field_offset, + static_cast<int16_t>(field_value.value)); + } else { + obj->SetFieldShort<false, kCheckTransaction>(field_offset, + static_cast<int16_t>(field_value.value)); + } + break; case k32Bits: if (UNLIKELY(field_value.is_volatile)) { obj->SetField32Volatile<false, kCheckTransaction>(field_offset, diff --git a/runtime/transaction.h b/runtime/transaction.h index 7859126..6390049 100644 --- a/runtime/transaction.h +++ b/runtime/transaction.h @@ -41,6 +41,18 @@ class Transaction { ~Transaction(); // Record object field changes. + void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value, + bool is_volatile) + LOCKS_EXCLUDED(log_lock_); + void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value, + bool is_volatile) + LOCKS_EXCLUDED(log_lock_); + void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value, + bool is_volatile) + LOCKS_EXCLUDED(log_lock_); + void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value, + bool is_volatile) + LOCKS_EXCLUDED(log_lock_); void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, bool is_volatile) LOCKS_EXCLUDED(log_lock_); @@ -82,6 +94,10 @@ class Transaction { private: class ObjectLog { public: + void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile); + void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile); + void LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile); + void LogShortValue(MemberOffset offset, int16_t value, bool is_volatile); void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile); void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile); void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile); @@ -95,6 +111,10 @@ class Transaction { private: enum FieldValueKind { + kBoolean, + kByte, + kChar, + kShort, k32Bits, k64Bits, kReference @@ -106,6 +126,7 @@ class Transaction { bool is_volatile; }; + void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile); void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset, const FieldValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index c01c692..6f9680f 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -2629,6 +2629,18 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::IPUT_QUICK: VerifyIPutQuick(inst, reg_types_.Integer(), true); break; + case Instruction::IPUT_BOOLEAN_QUICK: + VerifyIPutQuick(inst, reg_types_.Boolean(), true); + break; + case Instruction::IPUT_BYTE_QUICK: + VerifyIPutQuick(inst, reg_types_.Byte(), true); + break; + case Instruction::IPUT_CHAR_QUICK: + VerifyIPutQuick(inst, reg_types_.Char(), true); + break; + case Instruction::IPUT_SHORT_QUICK: + VerifyIPutQuick(inst, reg_types_.Short(), true); + break; case Instruction::IPUT_WIDE_QUICK: VerifyIPutQuick(inst, reg_types_.LongLo(), true); break; @@ -2661,10 +2673,6 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::UNUSED_43: case Instruction::UNUSED_79: case Instruction::UNUSED_7A: - case Instruction::UNUSED_EB: - case Instruction::UNUSED_EC: - case Instruction::UNUSED_ED: - case Instruction::UNUSED_EE: case Instruction::UNUSED_EF: case Instruction::UNUSED_F0: case Instruction::UNUSED_F1: |