summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-07-23 22:07:30 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-07-23 18:51:55 +0000
commit75c99e9dd8512a44d97d9818a74a69065ad82df0 (patch)
tree3a24bfb0303ea76c30c30e2e2f6c614e4739ecb1
parent0346b6bef939517b45216ee0eff4cf7a0c0667f4 (diff)
parent9ee4519afd97121f893f82d41d23164fc6c9ed34 (diff)
downloadart-75c99e9dd8512a44d97d9818a74a69065ad82df0.zip
art-75c99e9dd8512a44d97d9818a74a69065ad82df0.tar.gz
art-75c99e9dd8512a44d97d9818a74a69065ad82df0.tar.bz2
Merge "x86: GenSelect utility update"
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc2
-rw-r--r--compiler/dex/quick/gen_common.cc38
-rw-r--r--compiler/dex/quick/mir_to_lir.h38
-rw-r--r--compiler/dex/quick/ralloc_util.cc61
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h19
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc176
6 files changed, 130 insertions, 204 deletions
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 9cbf7b8..9bb9dda 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1107,7 +1107,7 @@ LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r
// take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
// in LDREXD and recalculate it from r_base.
RegStorage r_temp = AllocTemp();
- RegStorage r_temp_high = AllocFreeTemp(); // We may not have another temp.
+ RegStorage r_temp_high = AllocTemp(false); // We may not have another temp.
if (r_temp_high.Valid()) {
NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
FreeTemp(r_temp_high);
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 279e8ae..adc228c 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1191,15 +1191,14 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
bool can_assume_type_is_in_dex_cache,
uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src) {
- // X86 has its own implementation.
- DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
-
FlushAllRegs();
// May generate a call - use explicit registers
LockCallTemps();
RegStorage method_reg = TargetReg(kArg1, kRef);
LoadCurrMethodDirect(method_reg); // kArg1 <= current Method*
RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class*
+ RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg0 will hold the ref.
+ RegStorage ret_reg = GetReturn(kRefReg).reg;
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
@@ -1210,16 +1209,16 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
type_idx, true);
}
- OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref
+ OpRegCopy(class_reg, ret_reg); // Align usage with fast path
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
} else if (use_declaring_class) {
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
class_reg, kNotVolatile);
} else {
if (can_assume_type_is_in_dex_cache) {
// Conditionally, as in the other case we will also load it.
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
}
// Load dex cache entry into class_reg (kArg2)
@@ -1232,7 +1231,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
// Should load value here.
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
class InitTypeSlowPath : public Mir2Lir::LIRSlowPath {
public:
@@ -1269,21 +1268,22 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
}
/* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
RegLocation rl_result = GetReturn(kCoreReg);
- if (cu_->instruction_set == kMips) {
- // On MIPS rArg0 != rl_result, place false in result if branch is taken.
+ if (!IsSameReg(rl_result.reg, ref_reg)) {
+ // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
LoadConstant(rl_result.reg, 0);
}
- LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
/* load object->klass_ */
+ RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref.
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
- TargetReg(kArg1, kRef), kNotVolatile);
+ LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
+ ref_class_reg, kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
LIR* branchover = NULL;
if (type_known_final) {
- // rl_result == ref == null == 0.
- GenSelectConst32(TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), kCondEq, 1, 0, rl_result.reg,
+ // rl_result == ref == class.
+ GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
kCoreReg);
} else {
if (cu_->instruction_set == kThumb2) {
@@ -1293,11 +1293,11 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LIR* it = nullptr;
if (!type_known_abstract) {
/* Uses conditional nullification */
- OpRegReg(kOpCmp, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef)); // Same?
+ OpRegReg(kOpCmp, ref_class_reg, class_reg); // Same?
it = OpIT(kCondEq, "EE"); // if-convert the test
- LoadConstant(TargetReg(kArg0, kNotWide), 1); // .eq case - load true
+ LoadConstant(rl_result.reg, 1); // .eq case - load true
}
- OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class
+ OpRegCopy(ref_reg, class_reg); // .ne case - arg0 <= class
OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
if (it != nullptr) {
OpEndIT(it);
@@ -1310,7 +1310,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
}
- OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class
+ OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class
if (cu_->target64) {
CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial), false);
} else {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6f4a965..f183dc9 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -716,14 +716,13 @@ class Mir2Lir : public Backend {
virtual RegStorage AllocPreservedSingle(int s_reg);
virtual RegStorage AllocPreservedDouble(int s_reg);
RegStorage AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required);
- virtual RegStorage AllocFreeTemp();
- virtual RegStorage AllocTemp();
- virtual RegStorage AllocTempWide();
- virtual RegStorage AllocTempRef();
- virtual RegStorage AllocTempSingle();
- virtual RegStorage AllocTempDouble();
- virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
- virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
+ virtual RegStorage AllocTemp(bool required = true);
+ virtual RegStorage AllocTempWide(bool required = true);
+ virtual RegStorage AllocTempRef(bool required = true);
+ virtual RegStorage AllocTempSingle(bool required = true);
+ virtual RegStorage AllocTempDouble(bool required = true);
+ virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true);
+ virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true);
void FlushReg(RegStorage reg);
void FlushRegWide(RegStorage reg);
RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
@@ -1353,7 +1352,6 @@ class Mir2Lir : public Backend {
/**
* @brief Generates code to select one of the given constants depending on the given opcode.
- * @note Will neither call EvalLoc nor StoreValue for rl_dest.
*/
virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
@@ -1515,11 +1513,23 @@ class Mir2Lir : public Backend {
void AddSlowPath(LIRSlowPath* slowpath);
- virtual void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
- bool type_known_abstract, bool use_declaring_class,
- bool can_assume_type_is_in_dex_cache,
- uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src);
+ /*
+ *
+ * @brief Implement Set up instanceof a class.
+ * @param needs_access_check 'true' if we must check the access.
+ * @param type_known_final 'true' if the type is known to be a final class.
+ * @param type_known_abstract 'true' if the type is known to be an abstract class.
+ * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
+ * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
+ * @param type_idx Type index to use if use_declaring_class is 'false'.
+ * @param rl_dest Result to be set to 0 or 1.
+ * @param rl_src Object to be tested.
+ */
+ void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+ bool type_known_abstract, bool use_declaring_class,
+ bool can_assume_type_is_in_dex_cache,
+ uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src);
/*
* @brief Generate the debug_frame FDE information if possible.
* @returns pointer to vector containg CFE information, or NULL.
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index fa1c36e..45244e1 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -408,64 +408,67 @@ RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_
return RegStorage::InvalidReg(); // No register available
}
-/* Return a temp if one is available, -1 otherwise */
-RegStorage Mir2Lir::AllocFreeTemp() {
- return AllocTempBody(reg_pool_->core_regs_, &reg_pool_->next_core_reg_, false);
+RegStorage Mir2Lir::AllocTemp(bool required) {
+ return AllocTempBody(reg_pool_->core_regs_, &reg_pool_->next_core_reg_, required);
}
-RegStorage Mir2Lir::AllocTemp() {
- return AllocTempBody(reg_pool_->core_regs_, &reg_pool_->next_core_reg_, true);
-}
-
-RegStorage Mir2Lir::AllocTempWide() {
+RegStorage Mir2Lir::AllocTempWide(bool required) {
RegStorage res;
if (reg_pool_->core64_regs_.Size() != 0) {
- res = AllocTempBody(reg_pool_->core64_regs_, &reg_pool_->next_core64_reg_, true);
+ res = AllocTempBody(reg_pool_->core64_regs_, &reg_pool_->next_core64_reg_, required);
} else {
RegStorage low_reg = AllocTemp();
RegStorage high_reg = AllocTemp();
res = RegStorage::MakeRegPair(low_reg, high_reg);
}
- CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kIgnoreRef, FPCheck::kCheckNotFP);
+ if (required) {
+ CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kIgnoreRef, FPCheck::kCheckNotFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTempRef() {
- RegStorage res = AllocTempBody(*reg_pool_->ref_regs_, reg_pool_->next_ref_reg_, true);
- DCHECK(!res.IsPair());
- CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
+RegStorage Mir2Lir::AllocTempRef(bool required) {
+ RegStorage res = AllocTempBody(*reg_pool_->ref_regs_, reg_pool_->next_ref_reg_, required);
+ if (required) {
+ DCHECK(!res.IsPair());
+ CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTempSingle() {
- RegStorage res = AllocTempBody(reg_pool_->sp_regs_, &reg_pool_->next_sp_reg_, true);
- DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
- CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+RegStorage Mir2Lir::AllocTempSingle(bool required) {
+ RegStorage res = AllocTempBody(reg_pool_->sp_regs_, &reg_pool_->next_sp_reg_, required);
+ if (required) {
+ DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
+ CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTempDouble() {
- RegStorage res = AllocTempBody(reg_pool_->dp_regs_, &reg_pool_->next_dp_reg_, true);
- DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits();
- CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+RegStorage Mir2Lir::AllocTempDouble(bool required) {
+ RegStorage res = AllocTempBody(reg_pool_->dp_regs_, &reg_pool_->next_dp_reg_, required);
+ if (required) {
+ DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits();
+ CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
+RegStorage Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class, bool required) {
DCHECK_NE(reg_class, kRefReg); // NOTE: the Dalvik width of a reference is always 32 bits.
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
- return AllocTempDouble();
+ return AllocTempDouble(required);
}
- return AllocTempWide();
+ return AllocTempWide(required);
}
-RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class, bool required) {
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
- return AllocTempSingle();
+ return AllocTempSingle(required);
} else if (reg_class == kRefReg) {
- return AllocTempRef();
+ return AllocTempRef(required);
}
- return AllocTemp();
+ return AllocTemp(required);
}
RegStorage Mir2Lir::FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index d3982be..49c0a03 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -206,9 +206,6 @@ class X86Mir2Lir : public Mir2Lir {
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
int dest_reg_class) OVERRIDE;
- // Optimized version for selection of 0 and 1.
- void GenSelectConst01(RegStorage left_op, RegStorage right_op, ConditionCode code, bool true_val,
- RegStorage rs_dest);
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -274,22 +271,6 @@ class X86Mir2Lir : public Mir2Lir {
*/
void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
- /*
- *
- * @brief Implement Set up instanceof a class with x86 specific code.
- * @param needs_access_check 'true' if we must check the access.
- * @param type_known_final 'true' if the type is known to be a final class.
- * @param type_known_abstract 'true' if the type is known to be an abstract class.
- * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
- * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
- * @param type_idx Type index to use if use_declaring_class is 'false'.
- * @param rl_dest Result to be set to 0 or 1.
- * @param rl_src Object to be tested.
- */
- void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
- bool type_known_abstract, bool use_declaring_class,
- bool can_assume_type_is_in_dex_cache,
- uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b71ecb1..724ee7e 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -206,36 +206,65 @@ void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
}
}
-// Set rs_dest to 0 or 1 depending on the comparison between left_op and right_op.
-// rs_dest := (left_op <code> right_op) ? [true_val] : [!true_val]
-//
-// Implemented as:
-// true_val = true => rs_dest := 0;
-// rs_dest := (left_op <code> right_op) ? 1 : rs_dest;
-// true_val = false => rs_dest := 0;
-// rs_dest := (left_op <~code> right_op) ? 1 : rs_dest;
-void X86Mir2Lir::GenSelectConst01(RegStorage left_op, RegStorage right_op, ConditionCode code,
- bool true_val, RegStorage rs_dest) {
- LoadConstant(rs_dest, 0);
- OpRegReg(kOpCmp, left_op, right_op);
- // Set the low byte of the result to 0 or 1 from the compare condition code.
- NewLIR2(kX86Set8R, rs_dest.GetReg(),
- X86ConditionEncoding(true_val ? code : FlipComparisonOrder(code)));
-}
-
void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
int dest_reg_class) {
- if ((true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0)) {
- // Can we use Setcc?
- if (rs_dest.Is64Bit() || rs_dest.GetRegNum() < 4) {
- GenSelectConst01(left_op, right_op, code, true_val == 1, rs_dest);
- return;
- }
+ DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
+ DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
+
+ // We really need this check for correctness, otherwise we will need to do more checks in
+ // non zero/one case
+ if (true_val == false_val) {
+ LoadConstantNoClobber(rs_dest, true_val);
+ return;
}
- // TODO: Refactor the code below to make this more general.
- UNIMPLEMENTED(FATAL) << "General GenSelectConst32 not implemented for x86.";
+ const bool dest_intersect = IsSameReg(rs_dest, left_op) || IsSameReg(rs_dest, right_op);
+
+ const bool zero_one_case = (true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0);
+ if (zero_one_case && IsByteRegister(rs_dest)) {
+ if (!dest_intersect) {
+ LoadConstantNoClobber(rs_dest, 0);
+ }
+ OpRegReg(kOpCmp, left_op, right_op);
+ // Set the low byte of the result to 0 or 1 from the compare condition code.
+ NewLIR2(kX86Set8R, rs_dest.GetReg(),
+ X86ConditionEncoding(true_val == 1 ? code : FlipComparisonOrder(code)));
+ if (dest_intersect) {
+ NewLIR2(rs_dest.Is64Bit() ? kX86Movzx8qRR : kX86Movzx8RR, rs_dest.GetReg(), rs_dest.GetReg());
+ }
+ } else {
+ // Be careful rs_dest can be changed only after cmp because it can be the same as one of ops
+ // and it cannot use xor because it makes cc flags to be dirty
+ RegStorage temp_reg = AllocTypedTemp(false, dest_reg_class, false);
+ if (temp_reg.Valid()) {
+ if (false_val == 0 && dest_intersect) {
+ code = FlipComparisonOrder(code);
+ std::swap(true_val, false_val);
+ }
+ if (!dest_intersect) {
+ LoadConstantNoClobber(rs_dest, false_val);
+ }
+ LoadConstantNoClobber(temp_reg, true_val);
+ OpRegReg(kOpCmp, left_op, right_op);
+ if (dest_intersect) {
+ LoadConstantNoClobber(rs_dest, false_val);
+ DCHECK(!last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
+ }
+ OpCondRegReg(kOpCmov, code, rs_dest, temp_reg);
+ FreeTemp(temp_reg);
+ } else {
+ // slow path
+ LIR* cmp_branch = OpCmpBranch(code, left_op, right_op, nullptr);
+ LoadConstantNoClobber(rs_dest, false_val);
+ LIR* that_is_it = NewLIR1(kX86Jmp8, 0);
+ LIR* true_case = NewLIR0(kPseudoTargetLabel);
+ cmp_branch->target = true_case;
+ LoadConstantNoClobber(rs_dest, true_val);
+ LIR* end = NewLIR0(kPseudoTargetLabel);
+ that_is_it->target = end;
+ }
+ }
}
void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
@@ -2431,103 +2460,6 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
StoreValue(rl_dest, rl_result);
}
-void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
- bool type_known_abstract, bool use_declaring_class,
- bool can_assume_type_is_in_dex_cache,
- uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src) {
- FlushAllRegs();
- // May generate a call - use explicit registers.
- LockCallTemps();
- RegStorage method_reg = TargetReg(kArg1, kRef); // kArg1 gets current Method*.
- LoadCurrMethodDirect(method_reg);
- RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class*.
- RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg2 will hold the ref.
- // Reference must end up in kArg0.
- if (needs_access_check) {
- // Check we have access to type_idx and if not throw IllegalAccessError,
- // Caller function returns Class* in kArg0.
- if (cu_->target64) {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- } else {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- }
- OpRegCopy(class_reg, TargetReg(kRet0, kRef));
- LoadValueDirectFixed(rl_src, ref_reg);
- } else if (use_declaring_class) {
- LoadValueDirectFixed(rl_src, ref_reg);
- LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
- } else {
- // Load dex cache entry into class_reg (kArg2).
- LoadValueDirectFixed(rl_src, ref_reg);
- LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
- int32_t offset_of_type =
- mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() +
- (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx);
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
- if (!can_assume_type_is_in_dex_cache) {
- // Need to test presence of type in dex cache at runtime.
- LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
- // Type is not resolved. Call out to helper, which will return resolved type in kRet0/kArg0.
- if (cu_->target64) {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true);
- } else {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
- }
- OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path.
- LoadValueDirectFixed(rl_src, ref_reg); /* Reload Ref. */
- // Rejoin code paths
- LIR* hop_target = NewLIR0(kPseudoTargetLabel);
- hop_branch->target = hop_target;
- }
- }
- /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result. */
- RegLocation rl_result = GetReturn(kRefReg);
-
- // On x86-64 kArg0 is not EAX, so we have to copy ref from kArg0 to EAX.
- if (cu_->target64) {
- OpRegCopy(rl_result.reg, ref_reg);
- }
-
- // Is the class NULL?
- LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
-
- RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg2 will hold the Class*.
- /* Load object->klass_. */
- DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg,
- kNotVolatile);
- /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
- LIR* branchover = nullptr;
- if (type_known_final) {
- GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg, kCoreReg);
- } else {
- if (!type_known_abstract) {
- LoadConstant(rl_result.reg, 1); // Assume result succeeds.
- branchover = OpCmpBranch(kCondEq, ref_class_reg, class_reg, NULL);
- }
- OpRegCopy(TargetReg(kArg0, kRef), class_reg);
- if (cu_->target64) {
- OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial));
- } else {
- OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
- }
- }
- // TODO: only clobber when type isn't final?
- ClobberCallerSave();
- /* Branch targets here. */
- LIR* target = NewLIR0(kPseudoTargetLabel);
- StoreValue(rl_dest, rl_result);
- branch1->target = target;
- if (branchover != nullptr) {
- branchover->target = target;
- }
-}
-
void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_lhs, RegLocation rl_rhs) {
OpKind op = kOpBkpt;