summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/compiler_enums.h7
-rw-r--r--compiler/dex/quick/arm/call_arm.cc4
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h8
-rw-r--r--compiler/dex/quick/arm/int_arm.cc12
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc129
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h1
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc31
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc6
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h62
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc29
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc29
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc72
-rw-r--r--compiler/dex/quick/codegen_util.cc9
-rw-r--r--compiler/dex/quick/gen_common.cc129
-rw-r--r--compiler/dex/quick/gen_invoke.cc36
-rw-r--r--compiler/dex/quick/gen_loadstore.cc12
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h8
-rw-r--r--compiler/dex/quick/mips/int_mips.cc8
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc52
-rw-r--r--compiler/dex/quick/mir_to_lir.cc30
-rw-r--r--compiler/dex/quick/mir_to_lir.h36
-rw-r--r--compiler/dex/quick/ralloc_util.cc6
-rw-r--r--compiler/dex/quick/x86/call_x86.cc3
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h8
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc12
-rw-r--r--compiler/dex/quick/x86/int_x86.cc38
-rw-r--r--compiler/dex/quick/x86/target_x86.cc18
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc47
29 files changed, 327 insertions, 519 deletions
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index caecb7a..de9ac4b 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -527,13 +527,6 @@ enum FixupKind {
std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
-enum VolatileKind {
- kNotVolatile, // Load/Store is not volatile
- kVolatile // Load/Store is volatile
-};
-
-std::ostream& operator<<(std::ostream& os, const VolatileKind& kind);
-
} // namespace art
#endif // ART_COMPILER_DEX_COMPILER_ENUMS_H_
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 04d6898..590c767 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -316,9 +316,9 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg, kNotVolatile);
+ LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg);
LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg, kNotVolatile);
+ StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 70dce7f..4499862 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -33,16 +33,20 @@ class ArmMir2Lir FINAL : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index e34d944..916c528 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -723,7 +723,7 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -737,13 +737,13 @@ bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == k64) {
// Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32, kNotVolatile);
- StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
+ StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
}
return true;
}
@@ -1230,7 +1230,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
FreeTemp(reg_len);
}
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size);
MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
@@ -1330,7 +1330,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index bc8f95b..b236f99 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -961,37 +961,31 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
return load;
}
+LIR* ArmMir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+ // Only 64-bit load needs special handling.
+ if (UNLIKELY(size == k64 || size == kDouble)) {
+ DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
+ // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
+ if (!cu_->compiler_driver->GetInstructionSetFeatures().HasLpae()) {
+ // Use LDREXD for the atomic load. (Expect displacement > 0, don't optimize for == 0.)
+ RegStorage r_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
+ LIR* lir = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg());
+ FreeTemp(r_ptr);
+ return lir;
+ }
+ }
+ return LoadBaseDisp(r_base, displacement, r_dest, size);
+}
+
LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
+ OpSize size) {
// TODO: base this on target.
if (size == kWord) {
size = k32;
}
- LIR* load;
- if (UNLIKELY(is_volatile == kVolatile &&
- (size == k64 || size == kDouble) &&
- !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
- // Only 64-bit load needs special handling.
- // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
- DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
- // Use LDREXD for the atomic load. (Expect displacement > 0, don't optimize for == 0.)
- RegStorage r_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
- LIR* lir = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg());
- FreeTemp(r_ptr);
- return lir;
- } else {
- load = LoadBaseDispBody(r_base, displacement, r_dest, size);
- }
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
- }
-
- return load;
+ return LoadBaseDispBody(r_base, displacement, r_dest, size);
}
@@ -1087,58 +1081,49 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
return store;
}
-LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile)) {
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- }
-
- LIR* store;
- if (UNLIKELY(is_volatile == kVolatile &&
- (size == k64 || size == kDouble) &&
- !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
- // Only 64-bit store needs special handling.
- // If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
- // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
+LIR* ArmMir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
+ // Only 64-bit store needs special handling.
+ if (UNLIKELY(size == k64 || size == kDouble)) {
DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadSave().
- RegStorage r_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
- LIR* fail_target = NewLIR0(kPseudoTargetLabel);
- // We have only 5 temporary registers available and if r_base, r_src and r_ptr already
- // take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
- // in LDREXD and recalculate it from r_base.
- RegStorage r_temp = AllocTemp();
- RegStorage r_temp_high = AllocFreeTemp(); // We may not have another temp.
- if (r_temp_high.Valid()) {
- NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
- FreeTemp(r_temp_high);
- FreeTemp(r_temp);
- } else {
- // If we don't have another temp, clobber r_ptr in LDREXD and reload it.
- NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_ptr.GetReg(), r_ptr.GetReg());
- FreeTemp(r_temp); // May need the temp for kOpAdd.
+ // If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
+ if (!cu_->compiler_driver->GetInstructionSetFeatures().HasLpae()) {
+ // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
+ RegStorage r_ptr = AllocTemp();
OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
+ LIR* fail_target = NewLIR0(kPseudoTargetLabel);
+ // We have only 5 temporary registers available and if r_base, r_src and r_ptr already
+ // take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
+ // in LDREXD and recalculate it from r_base.
+ RegStorage r_temp = AllocTemp();
+ RegStorage r_temp_high = AllocFreeTemp(); // We may not have another temp.
+ if (r_temp_high.Valid()) {
+ NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
+ FreeTemp(r_temp_high);
+ FreeTemp(r_temp);
+ } else {
+ // If we don't have another temp, clobber r_ptr in LDREXD and reload it.
+ NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_ptr.GetReg(), r_ptr.GetReg());
+ FreeTemp(r_temp); // May need the temp for kOpAdd.
+ OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
+ }
+ LIR* lir = NewLIR4(kThumb2Strexd, r_temp.GetReg(), r_src.GetLowReg(), r_src.GetHighReg(),
+ r_ptr.GetReg());
+ OpCmpImmBranch(kCondNe, r_temp, 0, fail_target);
+ FreeTemp(r_ptr);
+ return lir;
}
- store = NewLIR4(kThumb2Strexd, r_temp.GetReg(), r_src.GetLowReg(), r_src.GetHighReg(),
- r_ptr.GetReg());
- OpCmpImmBranch(kCondNe, r_temp, 0, fail_target);
- FreeTemp(r_ptr);
- } else {
- // TODO: base this on target.
- if (size == kWord) {
- size = k32;
- }
-
- store = StoreBaseDispBody(r_base, displacement, r_src, size);
}
+ return StoreBaseDisp(r_base, displacement, r_src, size);
+}
- if (UNLIKELY(is_volatile == kVolatile)) {
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
+LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
}
-
- return store;
+ return StoreBaseDispBody(r_base, displacement, r_src, size);
}
LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 1f1a252..3f32c51 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -101,7 +101,6 @@ namespace art {
// Temporary macros, used to mark code which wants to distinguish betweek zr/sp.
#define A64_REG_IS_SP(reg_num) ((reg_num) == rwsp || (reg_num) == rsp)
#define A64_REG_IS_ZR(reg_num) ((reg_num) == rwzr || (reg_num) == rxzr)
-#define A64_REGSTORAGE_IS_SP_OR_ZR(rs) (((rs).GetRegNum() & 0x1f) == 0x1f)
enum Arm64ResourceEncodingPos {
kArm64GPReg0 = 0,
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index bee64f1..2a8da24 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -632,19 +632,19 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
if (static_cast<unsigned>(kind) < kFmtBitBlt) {
bool is_zero = A64_REG_IS_ZR(operand);
- if (kIsDebugBuild && (kFailOnSizeError || kReportSizeError)) {
+ if (kIsDebugBuild) {
// Register usage checks: First establish register usage requirements based on the
// format in `kind'.
bool want_float = false;
bool want_64_bit = false;
- bool want_var_size = true;
+ bool want_size_match = false;
bool want_zero = false;
switch (kind) {
case kFmtRegX:
want_64_bit = true;
// Intentional fall-through.
case kFmtRegW:
- want_var_size = false;
+ want_size_match = true;
// Intentional fall-through.
case kFmtRegR:
want_zero = true;
@@ -653,7 +653,7 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
want_64_bit = true;
// Intentional fall-through.
case kFmtRegWOrSp:
- want_var_size = false;
+ want_size_match = true;
break;
case kFmtRegROrSp:
break;
@@ -661,7 +661,7 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
want_64_bit = true;
// Intentional fall-through.
case kFmtRegS:
- want_var_size = false;
+ want_size_match = true;
// Intentional fall-through.
case kFmtRegF:
want_float = true;
@@ -672,27 +672,21 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
break;
}
- // want_var_size == true means kind == kFmtReg{R,F}. In these two cases, we want
- // the register size to be coherent with the instruction width.
- if (want_var_size) {
- want_64_bit = opcode_is_wide;
- }
-
// Now check that the requirements are satisfied.
RegStorage reg(operand | RegStorage::kValid);
const char *expected = nullptr;
if (want_float) {
if (!reg.IsFloat()) {
expected = "float register";
- } else if (reg.IsDouble() != want_64_bit) {
+ } else if (want_size_match && (reg.IsDouble() != want_64_bit)) {
expected = (want_64_bit) ? "double register" : "single register";
}
} else {
if (reg.IsFloat()) {
expected = "core register";
- } else if (reg.Is64Bit() != want_64_bit) {
+ } else if (want_size_match && (reg.Is64Bit() != want_64_bit)) {
expected = (want_64_bit) ? "x-register" : "w-register";
- } else if (A64_REGSTORAGE_IS_SP_OR_ZR(reg) && is_zero != want_zero) {
+ } else if (reg.GetRegNum() == 31 && is_zero != want_zero) {
expected = (want_zero) ? "zero-register" : "sp-register";
}
}
@@ -704,13 +698,8 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
if (expected != nullptr) {
LOG(WARNING) << "Method: " << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< " @ 0x" << std::hex << lir->dalvik_offset;
- if (kFailOnSizeError) {
- LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
- << ". Expected " << expected << ", got 0x" << std::hex << operand;
- } else {
- LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
- << ". Expected " << expected << ", got 0x" << std::hex << operand;
- }
+ LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+ << ". Expected " << expected << ", got 0x" << std::hex << operand;
}
}
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index c3f4711..1df576b 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -267,7 +267,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w1, rs_w2, NULL);
GenMemBarrier(kStoreLoad);
- Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_wzr);
+ Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_xzr);
LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -289,8 +289,8 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- LoadRefDisp(rs_rA64_SELF, ex_offset, rl_result.reg, kNotVolatile);
- StoreRefDisp(rs_rA64_SELF, ex_offset, rs_xzr, kNotVolatile);
+ LoadRefDisp(rs_rA64_SELF, ex_offset, rl_result.reg);
+ StoreRefDisp(rs_rA64_SELF, ex_offset, rs_xzr);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 68fa6f4..f1270ec 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -26,11 +26,6 @@ namespace art {
class Arm64Mir2Lir : public Mir2Lir {
protected:
- // If we detect a size error, FATAL out.
- static constexpr bool kFailOnSizeError = false && kIsDebugBuild;
- // If we detect a size error, report to LOG.
- static constexpr bool kReportSizeError = false && kIsDebugBuild;
-
// TODO: consolidate 64-bit target support.
class InToRegStorageMapper {
public:
@@ -74,25 +69,22 @@ class Arm64Mir2Lir : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile)
- OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
- LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
- VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size) OVERRIDE;
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
@@ -291,15 +283,8 @@ class Arm64Mir2Lir : public Mir2Lir {
* @see As64BitReg
*/
RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(reg.Is64Bit());
DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 64b register";
- } else {
- LOG(WARNING) << "Expected 64b register";
- return reg;
- }
- }
RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
reg.GetRawBits() & RegStorage::kRegTypeMask);
DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
@@ -308,18 +293,6 @@ class Arm64Mir2Lir : public Mir2Lir {
return ret_val;
}
- RegStorage Check32BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 32b register";
- } else {
- LOG(WARNING) << "Checked for 32b register";
- return As32BitReg(reg);
- }
- }
- return reg;
- }
-
/**
* @brief Given register wNN (sNN), returns register xNN (dNN).
* @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
@@ -327,15 +300,8 @@ class Arm64Mir2Lir : public Mir2Lir {
* @see As32BitReg
*/
RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(reg.Is32Bit());
DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 32b register";
- } else {
- LOG(WARNING) << "Expected 32b register";
- return reg;
- }
- }
RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
reg.GetRawBits() & RegStorage::kRegTypeMask);
DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
@@ -344,18 +310,6 @@ class Arm64Mir2Lir : public Mir2Lir {
return ret_val;
}
- RegStorage Check64BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 64b register";
- } else {
- LOG(WARNING) << "Checked for 64b register";
- return As64BitReg(reg);
- }
- }
- return reg;
- }
-
LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 1fdbe2d..2ac4adb 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -410,7 +410,7 @@ bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
if (size == k64) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -433,7 +433,7 @@ bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
rl_value = LoadValue(rl_src_value, kCoreReg);
}
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
return true;
}
@@ -747,11 +747,7 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
FreeTemp(reg_len);
}
- if (rl_result.ref) {
- LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
- } else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
- }
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size);
MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
@@ -772,11 +768,7 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- if (rl_result.ref) {
- LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg);
- } else {
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
- }
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -855,11 +847,8 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
FreeTemp(reg_len);
}
- if (rl_src.ref) {
- StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
- } else {
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
- }
+
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
@@ -869,11 +858,7 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- if (rl_src.ref) {
- StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg);
- } else {
- StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
- }
+ StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index dfaa483..06e1cda 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -789,7 +789,7 @@ RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
// TODO(Arm64): use LoadWordDisp instead.
// e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
- LoadBaseDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR, k64, kNotVolatile);
+ LoadBaseDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR, k64);
return rs_rA64_LR;
}
@@ -949,7 +949,7 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
}
if (cu_->num_ins == 0) {
@@ -971,7 +971,7 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
} else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg);
} else {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, op_size, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, op_size);
if (reg.Is64Bit()) {
if (SRegOffset(start_vreg + i) + 4 != SRegOffset(start_vreg + i + 1)) {
LOG(FATAL) << "64 bit value stored in non-consecutive 4 bytes slots";
@@ -1057,14 +1057,14 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
}
next_arg++;
}
@@ -1122,27 +1122,18 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
}
i++;
} else {
if (rl_arg.location == kLocPhysReg) {
- if (rl_arg.ref) {
- StoreRefDisp(TargetReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
- } else {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
- }
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
} else {
- if (rl_arg.ref) {
- LoadValueDirectFixed(rl_arg, regSingle);
- StoreRefDisp(TargetReg(kSp), out_offset, regSingle, kNotVolatile);
- } else {
- LoadValueDirectFixed(rl_arg, As32BitReg(regSingle));
- StoreBaseDisp(TargetReg(kSp), out_offset, As32BitReg(regSingle), k32, kNotVolatile);
- }
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
}
}
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 12c2f41..672aa88 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -893,7 +893,9 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
ArmOpcode opcode = kA64Brk1d;
DCHECK(r_base.Is64Bit());
// TODO: need a cleaner handling of index registers here and throughout.
- r_index = Check32BitReg(r_index);
+ if (r_index.Is32Bit()) {
+ r_index = As64BitReg(r_index);
+ }
if (r_dest.IsFloat()) {
if (r_dest.IsDouble()) {
@@ -916,14 +918,12 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
case kDouble:
case kWord:
case k64:
- r_dest = Check64BitReg(r_dest);
opcode = WIDE(kA64Ldr4rXxG);
expected_scale = 3;
break;
case kSingle:
case k32:
case kReference:
- r_dest = Check32BitReg(r_dest);
opcode = kA64Ldr4rXxG;
expected_scale = 2;
break;
@@ -959,10 +959,6 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
return load;
}
-LIR* Arm64Mir2Lir::LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) {
- return LoadBaseIndexed(r_base, r_index, As32BitReg(r_dest), 2, kReference);
-}
-
LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
LIR* store;
@@ -970,7 +966,9 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
ArmOpcode opcode = kA64Brk1d;
DCHECK(r_base.Is64Bit());
// TODO: need a cleaner handling of index registers here and throughout.
- r_index = Check32BitReg(r_index);
+ if (r_index.Is32Bit()) {
+ r_index = As64BitReg(r_index);
+ }
if (r_src.IsFloat()) {
if (r_src.IsDouble()) {
@@ -993,14 +991,12 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
case kDouble: // Intentional fall-trough.
case kWord: // Intentional fall-trough.
case k64:
- r_src = Check64BitReg(r_src);
opcode = WIDE(kA64Str4rXxG);
expected_scale = 3;
break;
case kSingle: // Intentional fall-trough.
case k32: // Intentional fall-trough.
case kReference:
- r_src = Check32BitReg(r_src);
opcode = kA64Str4rXxG;
expected_scale = 2;
break;
@@ -1030,10 +1026,6 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
return store;
}
-LIR* Arm64Mir2Lir::StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) {
- return StoreBaseIndexed(r_base, r_index, As32BitReg(r_src), 2, kReference);
-}
-
/*
* Load value from base + displacement. Optionally perform null check
* on base (which must have an associated s_reg and MIR). If not
@@ -1050,7 +1042,6 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
case kDouble: // Intentional fall-through.
case kWord: // Intentional fall-through.
case k64:
- r_dest = Check64BitReg(r_dest);
scale = 3;
if (r_dest.IsFloat()) {
DCHECK(r_dest.IsDouble());
@@ -1064,7 +1055,6 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
case kSingle: // Intentional fall-through.
case k32: // Intentional fall-trough.
case kReference:
- r_dest = Check32BitReg(r_dest);
scale = 2;
if (r_dest.IsFloat()) {
DCHECK(r_dest.IsSingle());
@@ -1116,28 +1106,19 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
return load;
}
-LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
+LIR* Arm64Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
// LoadBaseDisp() will emit correct insn for atomic load on arm64
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
-
- LIR* load = LoadBaseDispBody(r_base, displacement, r_dest, size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
- }
-
- return load;
+ return LoadBaseDisp(r_base, displacement, r_dest, size);
}
-LIR* Arm64Mir2Lir::LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile) {
- return LoadBaseDisp(r_base, displacement, As32BitReg(r_dest), kReference, is_volatile);
+LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+ return LoadBaseDispBody(r_base, displacement, r_dest, size);
}
+
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
LIR* store = NULL;
@@ -1149,7 +1130,6 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
case kDouble: // Intentional fall-through.
case kWord: // Intentional fall-through.
case k64:
- r_src = Check64BitReg(r_src);
scale = 3;
if (r_src.IsFloat()) {
DCHECK(r_src.IsDouble());
@@ -1163,7 +1143,6 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
case kSingle: // Intentional fall-through.
case k32: // Intentional fall-trough.
case kReference:
- r_src = Check32BitReg(r_src);
scale = 2;
if (r_src.IsFloat()) {
DCHECK(r_src.IsSingle());
@@ -1209,29 +1188,16 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
return store;
}
-LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile)) {
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- }
-
+LIR* Arm64Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
// StoreBaseDisp() will emit correct insn for atomic store on arm64
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
-
- LIR* store = StoreBaseDispBody(r_base, displacement, r_src, size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
- }
-
- return store;
+ return StoreBaseDisp(r_base, displacement, r_src, size);
}
-LIR* Arm64Mir2Lir::StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
- VolatileKind is_volatile) {
- return StoreBaseDisp(r_base, displacement, As32BitReg(r_src), kReference, is_volatile);
+LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
+ return StoreBaseDispBody(r_base, displacement, r_src, size);
}
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 5ff7380..ec0fb43 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -79,15 +79,6 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
}
-void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
- DCHECK(!after->flags.use_def_invalid);
- after->u.m.def_mask = &kEncodeAll;
- // As NewLIR0 uses Append, we need to create the LIR by hand.
- LIR* safepoint_pc = RawLIR(current_dalvik_offset_, kPseudoSafepointPC);
- InsertLIRAfter(after, safepoint_pc);
- DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
-}
-
/* Remove a LIR from the list. */
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b00cbeb..e36b592 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -196,15 +196,6 @@ void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
}
}
-void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
- if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return;
- }
- MarkSafepointPCAfter(after);
- }
-}
-
void Mir2Lir::MarkPossibleStackOverflowException() {
if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
MarkSafepointPC(last_lir_insn_);
@@ -515,7 +506,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
for (int i = 0; i < elems; i++) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
Store32Disp(TargetReg(kRet0),
- mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(rl_arg.reg)) {
FreeTemp(rl_arg.reg);
@@ -584,8 +575,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTempRef();
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
if (IsTemp(rl_method.reg)) {
FreeTemp(rl_method.reg);
}
@@ -602,10 +592,9 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
- LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
+ LoadRefDisp(r_base, offset_of_field, r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -637,12 +626,14 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
} else {
rl_src = LoadValue(rl_src, reg_class);
}
- if (is_object) {
- StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ if (field_info.IsVolatile()) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ StoreBaseDispVolatile(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
} else {
- StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
MarkGCCard(rl_src.reg, r_base);
@@ -681,8 +672,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTempRef();
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
@@ -695,10 +685,9 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
- LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
+ LoadRefDisp(r_base, offset_of_field, r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -728,12 +717,14 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
int field_offset = field_info.FieldOffset().Int32Value();
- if (is_object) {
- LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile :
- kNotVolatile);
+ if (field_info.IsVolatile()) {
+ LoadBaseDispVolatile(r_base, field_offset, rl_result.reg, load_size);
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
} else {
- LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ?
- kVolatile : kNotVolatile);
+ LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size);
}
FreeTemp(r_base);
@@ -794,15 +785,17 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
GenNullCheck(rl_obj.reg, opt_flags);
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
int field_offset = field_info.FieldOffset().Int32Value();
- LIR* load_lir;
- if (is_object) {
- load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ?
- kVolatile : kNotVolatile);
+ if (field_info.IsVolatile()) {
+ LoadBaseDispVolatile(rl_obj.reg, field_offset, rl_result.reg, load_size);
+ MarkPossibleNullPointerException(opt_flags);
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
} else {
- load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size);
+ MarkPossibleNullPointerException(opt_flags);
}
- MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir);
if (is_long_or_double) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -854,15 +847,17 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
}
GenNullCheck(rl_obj.reg, opt_flags);
int field_offset = field_info.FieldOffset().Int32Value();
- LIR* store;
- if (is_object) {
- store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ?
- kVolatile : kNotVolatile);
+ if (field_info.IsVolatile()) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ StoreBaseDispVolatile(rl_obj.reg, field_offset, rl_src.reg, store_size);
+ MarkPossibleNullPointerException(opt_flags);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
} else {
- store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size);
+ MarkPossibleNullPointerException(opt_flags);
}
- MarkPossibleNullPointerExceptionAfter(opt_flags, store);
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
MarkGCCard(rl_src.reg, rl_obj.reg);
}
@@ -921,9 +916,9 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile);
+ LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile);
+ LoadRefDisp(res_reg, offset_of_type, rl_result.reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
@@ -994,10 +989,10 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
LoadCurrMethodDirect(r_method);
}
LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
- TargetReg(kArg0), kNotVolatile);
+ TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
- LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0), kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
@@ -1036,9 +1031,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
RegStorage res_reg = AllocTempRef();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg,
- kNotVolatile);
- LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ LoadRefDisp(res_reg, offset_of_string, rl_result.reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -1139,17 +1133,14 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class,
- kNotVolatile);
- LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
- kNotVolatile);
+ LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
- kNotVolatile);
+ check_class);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
LIR* ne_branchover = NULL;
@@ -1205,14 +1196,14 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -1240,8 +1231,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
- kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
LIR* branchover = NULL;
if (type_known_final) {
@@ -1354,13 +1344,13 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
@@ -1415,7 +1405,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
if (load_) {
m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
- m2l_->TargetReg(kArg1), kNotVolatile);
+ m2l_->TargetReg(kArg1));
}
if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2),
@@ -1446,8 +1436,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
- kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 008ebfb..638c590 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -501,7 +501,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
}
if (cu_->num_ins == 0) {
@@ -616,8 +616,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
case 1: // Get method->dex_cache_resolved_methods_
cg->LoadRefDisp(cg->TargetReg(kArg0),
mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- cg->TargetReg(kArg0),
- kNotVolatile);
+ cg->TargetReg(kArg0));
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<uintptr_t>(-1)) {
@@ -632,8 +631,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
CHECK_EQ(cu->dex_file, target_method.dex_file);
cg->LoadRefDisp(cg->TargetReg(kArg0),
ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
- cg->TargetReg(kArg0),
- kNotVolatile);
+ cg->TargetReg(kArg0));
break;
case 3: // Grab the code from the method*
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -678,20 +676,17 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt),
- kNotVolatile);
+ cg->TargetReg(kInvokeTgt));
cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt),
- kNotVolatile);
+ cg->TargetReg(kInvokeTgt));
break;
case 3: // Get target method [use kInvokeTgt, set kArg0]
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt),
ObjArray::OffsetOfElement(method_idx).Int32Value(),
- cg->TargetReg(kArg0),
- kNotVolatile);
+ cg->TargetReg(kArg0));
break;
case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -736,22 +731,19 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// Get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt),
- kNotVolatile);
+ cg->TargetReg(kInvokeTgt));
cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
// NOTE: native pointer.
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt),
- kNotVolatile);
+ cg->TargetReg(kInvokeTgt));
break;
case 4: // Get target method [use kInvokeTgt, set kArg0]
// NOTE: native pointer.
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt),
ObjArray::OffsetOfElement(method_idx % ClassLinker::kImtSize).Int32Value(),
- cg->TargetReg(kArg0),
- kNotVolatile);
+ cg->TargetReg(kArg0));
break;
case 5: // Get the compiled code address [use kArg0, set kInvokeTgt]
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -975,7 +967,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
{
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
- StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64);
next_use += 2;
} else {
Store32Disp(TargetReg(kSp), outs_offset, arg_reg);
@@ -1045,7 +1037,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
}
next_arg += 2;
} else {
@@ -1315,7 +1307,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
reg_off = AllocTemp();
reg_ptr = AllocTempRef();
Load32Disp(rl_obj.reg, offset_offset, reg_off);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
+ LoadRefDisp(rl_obj.reg, value_offset, reg_ptr);
}
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
@@ -1680,7 +1672,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
- LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64);
FreeTemp(rl_temp_offset);
}
} else {
@@ -1727,7 +1719,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
- StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
+ StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64);
FreeTemp(rl_temp_offset);
}
} else {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index bfb77fc..d6f6ea1 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -66,7 +66,7 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
} else {
// Lives in the frame, need to store.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32);
}
if (!zero_reg.Valid()) {
FreeTemp(temp_reg);
@@ -93,7 +93,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
(rl_src.location == kLocCompilerTemp));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_src.ref) {
- LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, kNotVolatile);
+ LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
} else {
Load32Disp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
}
@@ -126,7 +126,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest) {
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64);
}
}
@@ -215,7 +215,7 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) {
def_start = last_lir_insn_;
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_dest.ref) {
- StoreRefDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kNotVolatile);
+ StoreRefDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
} else {
Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
}
@@ -305,7 +305,7 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) {
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
MarkClean(rl_dest);
def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -369,7 +369,7 @@ void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 26ea6a8..e53105f 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -264,9 +264,9 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg, kNotVolatile);
+ LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg);
LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg, kNotVolatile);
+ StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index c0ad916..571adac 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -33,16 +33,20 @@ class MipsMir2Lir FINAL : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 903a770..beaf6bb 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -294,7 +294,7 @@ bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -310,7 +310,7 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
return true;
}
@@ -524,7 +524,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
+ LoadBaseDisp(reg_ptr, 0, rl_result.reg, size);
FreeTemp(reg_ptr);
StoreValueWide(rl_dest, rl_result);
@@ -602,7 +602,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
- StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
+ StoreBaseDisp(reg_ptr, 0, rl_src.reg, size);
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index b49f436..01b25f9 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -546,31 +546,23 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
return load;
}
-LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
- if (is_volatile == kVolatile) {
- DCHECK(size != k64 && size != kDouble);
- }
+LIR* MipsMir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+ DCHECK(size != k64 && size != kDouble);
+ return LoadBaseDisp(r_base, displacement, r_dest, size);
+}
+LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
// TODO: base this on target.
if (size == kWord) {
size = k32;
}
- LIR* load;
if (size == k64 || size == kDouble) {
- load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), size);
+ return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), size);
} else {
- load = LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size);
+ return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size);
}
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
- }
-
- return load;
}
// FIXME: don't split r_dest into 2 containers.
@@ -656,31 +648,23 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
return res;
}
-LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) {
- if (is_volatile == kVolatile) {
- DCHECK(size != k64 && size != kDouble);
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- }
+LIR* MipsMir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
+ DCHECK(size != k64 && size != kDouble);
+ return StoreBaseDisp(r_base, displacement, r_src, size);
+}
+LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
// TODO: base this on target.
if (size == kWord) {
size = k32;
}
- LIR* store;
if (size == k64 || size == kDouble) {
- store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), size);
+ return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), size);
} else {
- store = StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
+ return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
}
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
- }
-
- return store;
}
LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 5d68187..1fc4163 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -92,7 +92,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide)
if (!reg_arg.Valid()) {
RegStorage new_reg =
wide ? AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
- LoadBaseDisp(TargetReg(kSp), offset, new_reg, wide ? k64 : k32, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), offset, new_reg, wide ? k64 : k32);
return new_reg;
} else {
// Check if we need to copy the arg to a different reg_class.
@@ -120,7 +120,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide)
// If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
if (!reg_arg_low.Valid()) {
RegStorage new_regs = AllocTypedTempWide(false, reg_class);
- LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64);
return new_regs; // The reg_class is OK, we can return.
} else {
// Assume that no ABI allows splitting a wide fp reg between a narrow fp reg and memory,
@@ -193,7 +193,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
if (reg.Valid()) {
OpRegCopy(rl_dest.reg, reg);
} else {
- LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64);
}
return;
}
@@ -211,7 +211,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
} else {
- LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64);
}
}
}
@@ -243,11 +243,14 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class)
: AllocTypedTemp(rl_dest.fp, reg_class);
}
- if (ref) {
- LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile);
+ if (data.is_volatile) {
+ LoadBaseDispVolatile(reg_obj, data.field_offset, r_result, size);
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
} else {
- LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile :
- kNotVolatile);
+ LoadBaseDisp(reg_obj, data.field_offset, r_result, size);
}
if (r_result != rl_dest.reg) {
if (wide) {
@@ -285,11 +288,14 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide);
- if (ref) {
- StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile);
+ if (data.is_volatile) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ StoreBaseDispVolatile(reg_obj, data.field_offset, reg_src, size);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
} else {
- StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile :
- kNotVolatile);
+ StoreBaseDisp(reg_obj, data.field_offset, reg_src, size);
}
if (ref) {
MarkGCCard(reg_src, reg_obj);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index b07c85e..f70087d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -663,7 +663,6 @@ class Mir2Lir : public Backend {
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
void MarkSafepointPC(LIR* inst);
- void MarkSafepointPCAfter(LIR* after);
void SetupResourceMasks(LIR* lir);
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
@@ -831,7 +830,6 @@ class Mir2Lir : public Backend {
void GenArrayBoundsCheck(int32_t index, RegStorage length);
LIR* GenNullCheck(RegStorage reg);
void MarkPossibleNullPointerException(int opt_flags);
- void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after);
void MarkPossibleStackOverflowException();
void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind);
@@ -1009,20 +1007,15 @@ class Mir2Lir : public Backend {
virtual LIR* LoadConstant(RegStorage r_dest, int value);
// Natural word size.
virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
- return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile);
+ return LoadBaseDisp(r_base, displacement, r_dest, kWord);
}
// Load 32 bits, regardless of target.
virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) {
- return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile);
+ return LoadBaseDisp(r_base, displacement, r_dest, k32);
}
// Load a reference at base + displacement and decompress into register.
- virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile) {
- return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
- }
- // Load a reference at base + index and decompress into register.
- virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) {
- return LoadBaseIndexed(r_base, r_index, r_dest, 2, kReference);
+ virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
+ return LoadBaseDisp(r_base, displacement, r_dest, kReference);
}
// Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress.
virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
@@ -1040,20 +1033,15 @@ class Mir2Lir : public Backend {
virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
// Store an item of natural word size.
virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile);
+ return StoreBaseDisp(r_base, displacement, r_src, kWord);
}
// Store an uncompressed reference into a compressed 32-bit container.
- virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
- VolatileKind is_volatile) {
- return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
- }
- // Store an uncompressed reference into a compressed 32-bit container by index.
- virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) {
- return StoreBaseIndexed(r_base, r_index, r_src, 2, kReference);
+ virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseDisp(r_base, displacement, r_src, kReference);
}
// Store 32 bits, regardless of target.
virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile);
+ return StoreBaseDisp(r_base, displacement, r_src, k32);
}
/**
@@ -1156,16 +1144,20 @@ class Mir2Lir : public Backend {
virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0;
virtual RegStorage LoadHelper(ThreadOffset<8> offset) = 0;
+ virtual LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) = 0;
virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) = 0;
+ OpSize size) = 0;
virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) = 0;
virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_dest, OpSize size) = 0;
virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
+ virtual LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) = 0;
virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) = 0;
+ OpSize size) = 0;
virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) = 0;
virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 60eebe4..5bb0ee0 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -735,7 +735,7 @@ void Mir2Lir::FlushRegWide(RegStorage reg) {
}
int v_reg = mir_graph_->SRegToVReg(info1->SReg());
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
}
} else {
RegisterInfo* info = GetRegInfo(reg);
@@ -743,7 +743,7 @@ void Mir2Lir::FlushRegWide(RegStorage reg) {
info->SetIsDirty(false);
int v_reg = mir_graph_->SRegToVReg(info->SReg());
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
}
}
}
@@ -755,7 +755,7 @@ void Mir2Lir::FlushReg(RegStorage reg) {
info->SetIsDirty(false);
int v_reg = mir_graph_->SRegToVReg(info->SReg());
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord);
}
}
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 425caec..28195ab 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -295,8 +295,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rs_rX86_ARG0.GetReg());
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0,
- Gen64Bit() ? k64 : k32, kNotVolatile);
+ setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0, Gen64Bit() ? k64 : k32);
}
FreeTemp(rs_rX86_ARG0);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 70382c7..d482e58 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -68,16 +68,20 @@ class X86Mir2Lir : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f854adb..5082d60 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -144,7 +144,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64);
}
}
@@ -178,7 +178,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
*/
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
StoreFinalValueWide(rl_dest, rl_result);
} else {
@@ -363,8 +363,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
- kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32);
}
}
@@ -374,8 +373,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
FlushSpecificReg(reg_info);
ResetDef(rl_src2.reg);
} else {
- StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
- kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32);
}
}
@@ -435,7 +433,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
StoreFinalValueWide(rl_dest, rl_result);
} else {
Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 350cfb8..2f914c1 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -754,7 +754,7 @@ bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
if (size == k64) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -772,12 +772,12 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == k64) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
}
return true;
}
@@ -1138,7 +1138,7 @@ void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int
NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
case 1:
- LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, displacement, dest, k32);
break;
default:
m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
@@ -1294,8 +1294,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src1_in_reg) {
NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
- kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32);
}
if (is_square) {
@@ -1318,8 +1317,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
- kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32);
}
// EAX <- EAX * 1L (2H * 1L)
@@ -1352,8 +1350,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
- kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32);
}
// EDX:EAX <- 2L * 1L (double precision)
@@ -2292,21 +2289,21 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class, kNotVolatile);
+ check_class);
} else {
LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
+ check_class);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
} else {
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class, kNotVolatile);
+ check_class);
} else {
LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
+ check_class);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
}
@@ -2353,16 +2350,16 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2).
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() +
(sizeof(mirror::HeapReference<mirror::Class*>) * type_idx);
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime.
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -2395,8 +2392,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
/* Load object->klass_. */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
- kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
LIR* branchover = nullptr;
if (type_known_final) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index e369d26..078dd5a 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1866,7 +1866,7 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
}
if (cu_->num_ins == 0) {
@@ -1916,11 +1916,11 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
}
if (need_flush) {
if (t_loc->wide && t_loc->fp) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
// Increment i to skip the next one
i++;
} else if (t_loc->wide && !t_loc->fp) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
// Increment i to skip the next one
i++;
} else {
@@ -2018,14 +2018,14 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
}
next_arg++;
}
@@ -2161,18 +2161,18 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
}
i++;
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
} else {
LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
}
}
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 0352808..ac5162e 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -585,7 +585,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
// value.
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
- kDouble, kNotVolatile);
+ kDouble);
res->target = data_target;
res->flags.fixup = kFixupLoad;
store_method_addr_used_ = true;
@@ -756,22 +756,17 @@ LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
}
-LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
+LIR* X86Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
// LoadBaseDisp() will emit correct insn for atomic load on x86
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return LoadBaseDisp(r_base, displacement, r_dest, size);
+}
- LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
- size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
- }
-
- return load;
+LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+ return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
+ size);
}
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
@@ -859,28 +854,20 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
/* store value base base + scaled index. */
LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
- int scale, OpSize size) {
+ int scale, OpSize size) {
return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
}
-LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
- VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile)) {
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- }
-
+LIR* X86Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement,
+ RegStorage r_src, OpSize size) {
// StoreBaseDisp() will emit correct insn for atomic store on x86
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return StoreBaseDisp(r_base, displacement, r_src, size);
+}
- LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
- }
-
- return store;
+LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
+ RegStorage r_src, OpSize size) {
+ return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
}
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,