summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-06-24 17:29:59 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-06-24 00:22:15 +0000
commit7e47f713067b55e24b5d24f2c892ceefd7971ebf (patch)
treebb3548662bb3ef524a8629f6c7711f3af0b4f475 /compiler
parent1528b02c4d5241e785bb680f13de70c355e67429 (diff)
parent2689fbad6b5ec1ae8f8c8791a80c6fd3cf24144d (diff)
downloadart-7e47f713067b55e24b5d24f2c892ceefd7971ebf.zip
art-7e47f713067b55e24b5d24f2c892ceefd7971ebf.tar.gz
art-7e47f713067b55e24b5d24f2c892ceefd7971ebf.tar.bz2
Merge "ART: Split out more cases of Load/StoreRef, volatile as parameter"
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/compiler_enums.h7
-rw-r--r--compiler/dex/quick/arm/call_arm.cc4
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h8
-rw-r--r--compiler/dex/quick/arm/int_arm.cc12
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc129
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h1
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc31
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc6
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h62
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc29
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc29
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc72
-rw-r--r--compiler/dex/quick/codegen_util.cc9
-rw-r--r--compiler/dex/quick/gen_common.cc129
-rw-r--r--compiler/dex/quick/gen_invoke.cc36
-rw-r--r--compiler/dex/quick/gen_loadstore.cc12
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h8
-rw-r--r--compiler/dex/quick/mips/int_mips.cc8
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc52
-rw-r--r--compiler/dex/quick/mir_to_lir.cc30
-rw-r--r--compiler/dex/quick/mir_to_lir.h36
-rw-r--r--compiler/dex/quick/ralloc_util.cc6
-rw-r--r--compiler/dex/quick/x86/call_x86.cc3
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h8
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc12
-rw-r--r--compiler/dex/quick/x86/int_x86.cc38
-rw-r--r--compiler/dex/quick/x86/target_x86.cc18
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc47
29 files changed, 519 insertions, 327 deletions
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index de9ac4b..caecb7a 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -527,6 +527,13 @@ enum FixupKind {
std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
+enum VolatileKind {
+ kNotVolatile, // Load/Store is not volatile
+ kVolatile // Load/Store is volatile
+};
+
+std::ostream& operator<<(std::ostream& os, const VolatileKind& kind);
+
} // namespace art
#endif // ART_COMPILER_DEX_COMPILER_ENUMS_H_
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 590c767..04d6898 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -316,9 +316,9 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg);
+ LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg, kNotVolatile);
LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg);
+ StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg, kNotVolatile);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 4499862..70dce7f 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -33,20 +33,16 @@ class ArmMir2Lir FINAL : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
- LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 916c528..e34d944 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -723,7 +723,7 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -737,13 +737,13 @@ bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == k64) {
// Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
- StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32, kNotVolatile);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
}
return true;
}
@@ -1230,7 +1230,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
FreeTemp(reg_len);
}
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size);
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
@@ -1330,7 +1330,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index b236f99..bc8f95b 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -961,31 +961,37 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
return load;
}
-LIR* ArmMir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
- // Only 64-bit load needs special handling.
- if (UNLIKELY(size == k64 || size == kDouble)) {
- DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
- // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
- if (!cu_->compiler_driver->GetInstructionSetFeatures().HasLpae()) {
- // Use LDREXD for the atomic load. (Expect displacement > 0, don't optimize for == 0.)
- RegStorage r_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
- LIR* lir = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg());
- FreeTemp(r_ptr);
- return lir;
- }
- }
- return LoadBaseDisp(r_base, displacement, r_dest, size);
-}
-
LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
+ OpSize size, VolatileKind is_volatile) {
// TODO: base this on target.
if (size == kWord) {
size = k32;
}
- return LoadBaseDispBody(r_base, displacement, r_dest, size);
+ LIR* load;
+ if (UNLIKELY(is_volatile == kVolatile &&
+ (size == k64 || size == kDouble) &&
+ !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
+ // Only 64-bit load needs special handling.
+ // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
+ DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
+ // Use LDREXD for the atomic load. (Expect displacement > 0, don't optimize for == 0.)
+ RegStorage r_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
+ LIR* lir = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg());
+ FreeTemp(r_ptr);
+ return lir;
+ } else {
+ load = LoadBaseDispBody(r_base, displacement, r_dest, size);
+ }
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
+ }
+
+ return load;
}
@@ -1081,49 +1087,58 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
return store;
}
-LIR* ArmMir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
- // Only 64-bit store needs special handling.
- if (UNLIKELY(size == k64 || size == kDouble)) {
- DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadSave().
+LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size, VolatileKind is_volatile) {
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ }
+
+ LIR* store;
+ if (UNLIKELY(is_volatile == kVolatile &&
+ (size == k64 || size == kDouble) &&
+ !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
+ // Only 64-bit store needs special handling.
// If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
- if (!cu_->compiler_driver->GetInstructionSetFeatures().HasLpae()) {
- // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
- RegStorage r_ptr = AllocTemp();
+ // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
+ DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadSave().
+ RegStorage r_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
+ LIR* fail_target = NewLIR0(kPseudoTargetLabel);
+ // We have only 5 temporary registers available and if r_base, r_src and r_ptr already
+ // take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
+ // in LDREXD and recalculate it from r_base.
+ RegStorage r_temp = AllocTemp();
+ RegStorage r_temp_high = AllocFreeTemp(); // We may not have another temp.
+ if (r_temp_high.Valid()) {
+ NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
+ FreeTemp(r_temp_high);
+ FreeTemp(r_temp);
+ } else {
+ // If we don't have another temp, clobber r_ptr in LDREXD and reload it.
+ NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_ptr.GetReg(), r_ptr.GetReg());
+ FreeTemp(r_temp); // May need the temp for kOpAdd.
OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
- LIR* fail_target = NewLIR0(kPseudoTargetLabel);
- // We have only 5 temporary registers available and if r_base, r_src and r_ptr already
- // take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
- // in LDREXD and recalculate it from r_base.
- RegStorage r_temp = AllocTemp();
- RegStorage r_temp_high = AllocFreeTemp(); // We may not have another temp.
- if (r_temp_high.Valid()) {
- NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
- FreeTemp(r_temp_high);
- FreeTemp(r_temp);
- } else {
- // If we don't have another temp, clobber r_ptr in LDREXD and reload it.
- NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_ptr.GetReg(), r_ptr.GetReg());
- FreeTemp(r_temp); // May need the temp for kOpAdd.
- OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
- }
- LIR* lir = NewLIR4(kThumb2Strexd, r_temp.GetReg(), r_src.GetLowReg(), r_src.GetHighReg(),
- r_ptr.GetReg());
- OpCmpImmBranch(kCondNe, r_temp, 0, fail_target);
- FreeTemp(r_ptr);
- return lir;
}
+ store = NewLIR4(kThumb2Strexd, r_temp.GetReg(), r_src.GetLowReg(), r_src.GetHighReg(),
+ r_ptr.GetReg());
+ OpCmpImmBranch(kCondNe, r_temp, 0, fail_target);
+ FreeTemp(r_ptr);
+ } else {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
+
+ store = StoreBaseDispBody(r_base, displacement, r_src, size);
}
- return StoreBaseDisp(r_base, displacement, r_src, size);
-}
-LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
- // TODO: base this on target.
- if (size == kWord) {
- size = k32;
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
}
- return StoreBaseDispBody(r_base, displacement, r_src, size);
+
+ return store;
}
LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 3f32c51..1f1a252 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -101,6 +101,7 @@ namespace art {
// Temporary macros, used to mark code which wants to distinguish betweek zr/sp.
#define A64_REG_IS_SP(reg_num) ((reg_num) == rwsp || (reg_num) == rsp)
#define A64_REG_IS_ZR(reg_num) ((reg_num) == rwzr || (reg_num) == rxzr)
+#define A64_REGSTORAGE_IS_SP_OR_ZR(rs) (((rs).GetRegNum() & 0x1f) == 0x1f)
enum Arm64ResourceEncodingPos {
kArm64GPReg0 = 0,
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2a8da24..bee64f1 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -632,19 +632,19 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
if (static_cast<unsigned>(kind) < kFmtBitBlt) {
bool is_zero = A64_REG_IS_ZR(operand);
- if (kIsDebugBuild) {
+ if (kIsDebugBuild && (kFailOnSizeError || kReportSizeError)) {
// Register usage checks: First establish register usage requirements based on the
// format in `kind'.
bool want_float = false;
bool want_64_bit = false;
- bool want_size_match = false;
+ bool want_var_size = true;
bool want_zero = false;
switch (kind) {
case kFmtRegX:
want_64_bit = true;
// Intentional fall-through.
case kFmtRegW:
- want_size_match = true;
+ want_var_size = false;
// Intentional fall-through.
case kFmtRegR:
want_zero = true;
@@ -653,7 +653,7 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
want_64_bit = true;
// Intentional fall-through.
case kFmtRegWOrSp:
- want_size_match = true;
+ want_var_size = false;
break;
case kFmtRegROrSp:
break;
@@ -661,7 +661,7 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
want_64_bit = true;
// Intentional fall-through.
case kFmtRegS:
- want_size_match = true;
+ want_var_size = false;
// Intentional fall-through.
case kFmtRegF:
want_float = true;
@@ -672,21 +672,27 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
break;
}
+ // want_var_size == true means kind == kFmtReg{R,F}. In these two cases, we want
+ // the register size to be coherent with the instruction width.
+ if (want_var_size) {
+ want_64_bit = opcode_is_wide;
+ }
+
// Now check that the requirements are satisfied.
RegStorage reg(operand | RegStorage::kValid);
const char *expected = nullptr;
if (want_float) {
if (!reg.IsFloat()) {
expected = "float register";
- } else if (want_size_match && (reg.IsDouble() != want_64_bit)) {
+ } else if (reg.IsDouble() != want_64_bit) {
expected = (want_64_bit) ? "double register" : "single register";
}
} else {
if (reg.IsFloat()) {
expected = "core register";
- } else if (want_size_match && (reg.Is64Bit() != want_64_bit)) {
+ } else if (reg.Is64Bit() != want_64_bit) {
expected = (want_64_bit) ? "x-register" : "w-register";
- } else if (reg.GetRegNum() == 31 && is_zero != want_zero) {
+ } else if (A64_REGSTORAGE_IS_SP_OR_ZR(reg) && is_zero != want_zero) {
expected = (want_zero) ? "zero-register" : "sp-register";
}
}
@@ -698,8 +704,13 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
if (expected != nullptr) {
LOG(WARNING) << "Method: " << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< " @ 0x" << std::hex << lir->dalvik_offset;
- LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
- << ". Expected " << expected << ", got 0x" << std::hex << operand;
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+ << ". Expected " << expected << ", got 0x" << std::hex << operand;
+ } else {
+ LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
+ << ". Expected " << expected << ", got 0x" << std::hex << operand;
+ }
}
}
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 1df576b..c3f4711 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -267,7 +267,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w1, rs_w2, NULL);
GenMemBarrier(kStoreLoad);
- Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_xzr);
+ Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_wzr);
LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -289,8 +289,8 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- LoadRefDisp(rs_rA64_SELF, ex_offset, rl_result.reg);
- StoreRefDisp(rs_rA64_SELF, ex_offset, rs_xzr);
+ LoadRefDisp(rs_rA64_SELF, ex_offset, rl_result.reg, kNotVolatile);
+ StoreRefDisp(rs_rA64_SELF, ex_offset, rs_xzr, kNotVolatile);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index f1270ec..68fa6f4 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -26,6 +26,11 @@ namespace art {
class Arm64Mir2Lir : public Mir2Lir {
protected:
+ // If we detect a size error, FATAL out.
+ static constexpr bool kFailOnSizeError = false && kIsDebugBuild;
+ // If we detect a size error, report to LOG.
+ static constexpr bool kReportSizeError = false && kIsDebugBuild;
+
// TODO: consolidate 64-bit target support.
class InToRegStorageMapper {
public:
@@ -69,22 +74,25 @@ class Arm64Mir2Lir : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
- LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ VolatileKind is_volatile)
+ OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
+ LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
+ LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
+ LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size) OVERRIDE;
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
@@ -283,8 +291,15 @@ class Arm64Mir2Lir : public Mir2Lir {
* @see As64BitReg
*/
RegStorage As32BitReg(RegStorage reg) {
- DCHECK(reg.Is64Bit());
DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register";
+ } else {
+ LOG(WARNING) << "Expected 64b register";
+ return reg;
+ }
+ }
RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
reg.GetRawBits() & RegStorage::kRegTypeMask);
DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
@@ -293,6 +308,18 @@ class Arm64Mir2Lir : public Mir2Lir {
return ret_val;
}
+ RegStorage Check32BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 32b register";
+ } else {
+ LOG(WARNING) << "Checked for 32b register";
+ return As32BitReg(reg);
+ }
+ }
+ return reg;
+ }
+
/**
* @brief Given register wNN (sNN), returns register xNN (dNN).
* @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
@@ -300,8 +327,15 @@ class Arm64Mir2Lir : public Mir2Lir {
* @see As32BitReg
*/
RegStorage As64BitReg(RegStorage reg) {
- DCHECK(reg.Is32Bit());
DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register";
+ } else {
+ LOG(WARNING) << "Expected 32b register";
+ return reg;
+ }
+ }
RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
reg.GetRawBits() & RegStorage::kRegTypeMask);
DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
@@ -310,6 +344,18 @@ class Arm64Mir2Lir : public Mir2Lir {
return ret_val;
}
+ RegStorage Check64BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 64b register";
+ } else {
+ LOG(WARNING) << "Checked for 64b register";
+ return As64BitReg(reg);
+ }
+ }
+ return reg;
+ }
+
LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 2ac4adb..1fdbe2d 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -410,7 +410,7 @@ bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
if (size == k64) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -433,7 +433,7 @@ bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
rl_value = LoadValue(rl_src_value, kCoreReg);
}
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
return true;
}
@@ -747,7 +747,11 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
FreeTemp(reg_len);
}
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size);
+ if (rl_result.ref) {
+ LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
+ } else {
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
+ }
MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
@@ -768,7 +772,11 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ if (rl_result.ref) {
+ LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg);
+ } else {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ }
MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -847,8 +855,11 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
FreeTemp(reg_len);
}
-
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
+ if (rl_src.ref) {
+ StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
+ } else {
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
+ }
MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
@@ -858,7 +869,11 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
+ if (rl_src.ref) {
+ StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg);
+ } else {
+ StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
+ }
MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 06e1cda..dfaa483 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -789,7 +789,7 @@ RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
// TODO(Arm64): use LoadWordDisp instead.
// e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
- LoadBaseDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR, k64);
+ LoadBaseDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR, k64, kNotVolatile);
return rs_rA64_LR;
}
@@ -949,7 +949,7 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
}
if (cu_->num_ins == 0) {
@@ -971,7 +971,7 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
} else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg);
} else {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, op_size);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, op_size, kNotVolatile);
if (reg.Is64Bit()) {
if (SRegOffset(start_vreg + i) + 4 != SRegOffset(start_vreg + i + 1)) {
LOG(FATAL) << "64 bit value stored in non-consecutive 4 bytes slots";
@@ -1057,14 +1057,14 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
}
next_arg++;
}
@@ -1122,18 +1122,27 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
}
i++;
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
+ if (rl_arg.ref) {
+ StoreRefDisp(TargetReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
+ } else {
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ }
} else {
- LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
+ if (rl_arg.ref) {
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreRefDisp(TargetReg(kSp), out_offset, regSingle, kNotVolatile);
+ } else {
+ LoadValueDirectFixed(rl_arg, As32BitReg(regSingle));
+ StoreBaseDisp(TargetReg(kSp), out_offset, As32BitReg(regSingle), k32, kNotVolatile);
+ }
}
}
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 672aa88..12c2f41 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -893,9 +893,7 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
ArmOpcode opcode = kA64Brk1d;
DCHECK(r_base.Is64Bit());
// TODO: need a cleaner handling of index registers here and throughout.
- if (r_index.Is32Bit()) {
- r_index = As64BitReg(r_index);
- }
+ r_index = Check32BitReg(r_index);
if (r_dest.IsFloat()) {
if (r_dest.IsDouble()) {
@@ -918,12 +916,14 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
case kDouble:
case kWord:
case k64:
+ r_dest = Check64BitReg(r_dest);
opcode = WIDE(kA64Ldr4rXxG);
expected_scale = 3;
break;
case kSingle:
case k32:
case kReference:
+ r_dest = Check32BitReg(r_dest);
opcode = kA64Ldr4rXxG;
expected_scale = 2;
break;
@@ -959,6 +959,10 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
return load;
}
+LIR* Arm64Mir2Lir::LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) {
+ return LoadBaseIndexed(r_base, r_index, As32BitReg(r_dest), 2, kReference);
+}
+
LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
LIR* store;
@@ -966,9 +970,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
ArmOpcode opcode = kA64Brk1d;
DCHECK(r_base.Is64Bit());
// TODO: need a cleaner handling of index registers here and throughout.
- if (r_index.Is32Bit()) {
- r_index = As64BitReg(r_index);
- }
+ r_index = Check32BitReg(r_index);
if (r_src.IsFloat()) {
if (r_src.IsDouble()) {
@@ -991,12 +993,14 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
case kDouble: // Intentional fall-trough.
case kWord: // Intentional fall-trough.
case k64:
+ r_src = Check64BitReg(r_src);
opcode = WIDE(kA64Str4rXxG);
expected_scale = 3;
break;
case kSingle: // Intentional fall-trough.
case k32: // Intentional fall-trough.
case kReference:
+ r_src = Check32BitReg(r_src);
opcode = kA64Str4rXxG;
expected_scale = 2;
break;
@@ -1026,6 +1030,10 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
return store;
}
+LIR* Arm64Mir2Lir::StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) {
+ return StoreBaseIndexed(r_base, r_index, As32BitReg(r_src), 2, kReference);
+}
+
/*
* Load value from base + displacement. Optionally perform null check
* on base (which must have an associated s_reg and MIR). If not
@@ -1042,6 +1050,7 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
case kDouble: // Intentional fall-through.
case kWord: // Intentional fall-through.
case k64:
+ r_dest = Check64BitReg(r_dest);
scale = 3;
if (r_dest.IsFloat()) {
DCHECK(r_dest.IsDouble());
@@ -1055,6 +1064,7 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
case kSingle: // Intentional fall-through.
case k32: // Intentional fall-trough.
case kReference:
+ r_dest = Check32BitReg(r_dest);
scale = 2;
if (r_dest.IsFloat()) {
DCHECK(r_dest.IsSingle());
@@ -1106,18 +1116,27 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
return load;
}
-LIR* Arm64Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
+LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size, VolatileKind is_volatile) {
// LoadBaseDisp() will emit correct insn for atomic load on arm64
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
- return LoadBaseDisp(r_base, displacement, r_dest, size);
-}
-LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
- return LoadBaseDispBody(r_base, displacement, r_dest, size);
+ LIR* load = LoadBaseDispBody(r_base, displacement, r_dest, size);
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
+ }
+
+ return load;
}
+LIR* Arm64Mir2Lir::LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ VolatileKind is_volatile) {
+ return LoadBaseDisp(r_base, displacement, As32BitReg(r_dest), kReference, is_volatile);
+}
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
@@ -1130,6 +1149,7 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
case kDouble: // Intentional fall-through.
case kWord: // Intentional fall-through.
case k64:
+ r_src = Check64BitReg(r_src);
scale = 3;
if (r_src.IsFloat()) {
DCHECK(r_src.IsDouble());
@@ -1143,6 +1163,7 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
case kSingle: // Intentional fall-through.
case k32: // Intentional fall-trough.
case kReference:
+ r_src = Check32BitReg(r_src);
scale = 2;
if (r_src.IsFloat()) {
DCHECK(r_src.IsSingle());
@@ -1188,16 +1209,29 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
return store;
}
-LIR* Arm64Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
+LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size, VolatileKind is_volatile) {
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ }
+
// StoreBaseDisp() will emit correct insn for atomic store on arm64
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
- return StoreBaseDisp(r_base, displacement, r_src, size);
+
+ LIR* store = StoreBaseDispBody(r_base, displacement, r_src, size);
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
+ }
+
+ return store;
}
-LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
- return StoreBaseDispBody(r_base, displacement, r_src, size);
+LIR* Arm64Mir2Lir::StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ VolatileKind is_volatile) {
+ return StoreBaseDisp(r_base, displacement, As32BitReg(r_src), kReference, is_volatile);
}
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index ec0fb43..5ff7380 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -79,6 +79,15 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
}
+void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
+ DCHECK(!after->flags.use_def_invalid);
+ after->u.m.def_mask = &kEncodeAll;
+ // As NewLIR0 uses Append, we need to create the LIR by hand.
+ LIR* safepoint_pc = RawLIR(current_dalvik_offset_, kPseudoSafepointPC);
+ InsertLIRAfter(after, safepoint_pc);
+ DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
+}
+
/* Remove a LIR from the list. */
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index e36b592..b00cbeb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -196,6 +196,15 @@ void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
}
}
+void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ MarkSafepointPCAfter(after);
+ }
+}
+
void Mir2Lir::MarkPossibleStackOverflowException() {
if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
MarkSafepointPC(last_lir_insn_);
@@ -506,7 +515,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
for (int i = 0; i < elems; i++) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
Store32Disp(TargetReg(kRet0),
- mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(rl_arg.reg)) {
FreeTemp(rl_arg.reg);
@@ -575,7 +584,8 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTempRef();
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
+ kNotVolatile);
if (IsTemp(rl_method.reg)) {
FreeTemp(rl_method.reg);
}
@@ -592,9 +602,10 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
+ kNotVolatile);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
- LoadRefDisp(r_base, offset_of_field, r_base);
+ LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -626,14 +637,12 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
} else {
rl_src = LoadValue(rl_src, reg_class);
}
- if (field_info.IsVolatile()) {
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- StoreBaseDispVolatile(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size);
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
+ if (is_object) {
+ StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg,
+ field_info.IsVolatile() ? kVolatile : kNotVolatile);
} else {
- StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size);
+ StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size,
+ field_info.IsVolatile() ? kVolatile : kNotVolatile);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
MarkGCCard(rl_src.reg, r_base);
@@ -672,7 +681,8 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTempRef();
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
+ kNotVolatile);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
@@ -685,9 +695,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
+ kNotVolatile);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
- LoadRefDisp(r_base, offset_of_field, r_base);
+ LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -717,14 +728,12 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
int field_offset = field_info.FieldOffset().Int32Value();
- if (field_info.IsVolatile()) {
- LoadBaseDispVolatile(r_base, field_offset, rl_result.reg, load_size);
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
+ if (is_object) {
+ LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile :
+ kNotVolatile);
} else {
- LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size);
+ LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ?
+ kVolatile : kNotVolatile);
}
FreeTemp(r_base);
@@ -785,17 +794,15 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
GenNullCheck(rl_obj.reg, opt_flags);
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
int field_offset = field_info.FieldOffset().Int32Value();
- if (field_info.IsVolatile()) {
- LoadBaseDispVolatile(rl_obj.reg, field_offset, rl_result.reg, load_size);
- MarkPossibleNullPointerException(opt_flags);
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
+ LIR* load_lir;
+ if (is_object) {
+ load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ?
+ kVolatile : kNotVolatile);
} else {
- LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size);
- MarkPossibleNullPointerException(opt_flags);
+ load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size,
+ field_info.IsVolatile() ? kVolatile : kNotVolatile);
}
+ MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir);
if (is_long_or_double) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -847,17 +854,15 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
}
GenNullCheck(rl_obj.reg, opt_flags);
int field_offset = field_info.FieldOffset().Int32Value();
- if (field_info.IsVolatile()) {
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- StoreBaseDispVolatile(rl_obj.reg, field_offset, rl_src.reg, store_size);
- MarkPossibleNullPointerException(opt_flags);
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
+ LIR* store;
+ if (is_object) {
+ store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ?
+ kVolatile : kNotVolatile);
} else {
- StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size);
- MarkPossibleNullPointerException(opt_flags);
+ store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size,
+ field_info.IsVolatile() ? kVolatile : kNotVolatile);
}
+ MarkPossibleNullPointerExceptionAfter(opt_flags, store);
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
MarkGCCard(rl_src.reg, rl_obj.reg);
}
@@ -916,9 +921,9 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg);
+ LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(res_reg, offset_of_type, rl_result.reg);
+ LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
@@ -989,10 +994,10 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
LoadCurrMethodDirect(r_method);
}
LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
- TargetReg(kArg0));
+ TargetReg(kArg0), kNotVolatile);
// Might call out to helper, which will return resolved string in kRet0
- LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
+ LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0), kNotVolatile);
LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
@@ -1031,8 +1036,9 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
RegStorage res_reg = AllocTempRef();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
- LoadRefDisp(res_reg, offset_of_string, rl_result.reg);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg,
+ kNotVolatile);
+ LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile);
StoreValue(rl_dest, rl_result);
}
}
@@ -1133,14 +1139,17 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
- LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class,
+ kNotVolatile);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
+ kNotVolatile);
} else {
LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class);
- LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ check_class, kNotVolatile);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
+ kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(check_class, offset_of_type, check_class);
+ LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
}
LIR* ne_branchover = NULL;
@@ -1196,14 +1205,14 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg);
+ class_reg, kNotVolatile);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg);
+ class_reg, kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg);
+ LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -1231,7 +1240,8 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
+ kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
LIR* branchover = NULL;
if (type_known_final) {
@@ -1344,13 +1354,13 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg);
+ class_reg, kNotVolatile);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg);
+ class_reg, kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg);
+ LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
@@ -1405,7 +1415,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
if (load_) {
m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
- m2l_->TargetReg(kArg1));
+ m2l_->TargetReg(kArg1), kNotVolatile);
}
if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2),
@@ -1436,7 +1446,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
+ kNotVolatile);
LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 638c590..008ebfb 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -501,7 +501,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
}
if (cu_->num_ins == 0) {
@@ -616,7 +616,8 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
case 1: // Get method->dex_cache_resolved_methods_
cg->LoadRefDisp(cg->TargetReg(kArg0),
mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- cg->TargetReg(kArg0));
+ cg->TargetReg(kArg0),
+ kNotVolatile);
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<uintptr_t>(-1)) {
@@ -631,7 +632,8 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
CHECK_EQ(cu->dex_file, target_method.dex_file);
cg->LoadRefDisp(cg->TargetReg(kArg0),
ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
- cg->TargetReg(kArg0));
+ cg->TargetReg(kArg0),
+ kNotVolatile);
break;
case 3: // Grab the code from the method*
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -676,17 +678,20 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ cg->TargetReg(kInvokeTgt),
+ kNotVolatile);
cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ cg->TargetReg(kInvokeTgt),
+ kNotVolatile);
break;
case 3: // Get target method [use kInvokeTgt, set kArg0]
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt),
ObjArray::OffsetOfElement(method_idx).Int32Value(),
- cg->TargetReg(kArg0));
+ cg->TargetReg(kArg0),
+ kNotVolatile);
break;
case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -731,19 +736,22 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// Get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ cg->TargetReg(kInvokeTgt),
+ kNotVolatile);
cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
// NOTE: native pointer.
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ cg->TargetReg(kInvokeTgt),
+ kNotVolatile);
break;
case 4: // Get target method [use kInvokeTgt, set kArg0]
// NOTE: native pointer.
cg->LoadRefDisp(cg->TargetReg(kInvokeTgt),
ObjArray::OffsetOfElement(method_idx % ClassLinker::kImtSize).Int32Value(),
- cg->TargetReg(kArg0));
+ cg->TargetReg(kArg0),
+ kNotVolatile);
break;
case 5: // Get the compiled code address [use kArg0, set kInvokeTgt]
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -967,7 +975,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
{
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
- StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64);
+ StoreBaseDisp(TargetReg(kSp), outs_offset, arg_reg, k64, kNotVolatile);
next_use += 2;
} else {
Store32Disp(TargetReg(kSp), outs_offset, arg_reg);
@@ -1037,7 +1045,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
@@ -1307,7 +1315,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
reg_off = AllocTemp();
reg_ptr = AllocTempRef();
Load32Disp(rl_obj.reg, offset_offset, reg_off);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr);
+ LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
}
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
@@ -1672,7 +1680,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
- LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64);
+ LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
FreeTemp(rl_temp_offset);
}
} else {
@@ -1719,7 +1727,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
- StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64);
+ StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
FreeTemp(rl_temp_offset);
}
} else {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index d6f6ea1..bfb77fc 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -66,7 +66,7 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
} else {
// Lives in the frame, need to store.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32, kNotVolatile);
}
if (!zero_reg.Valid()) {
FreeTemp(temp_reg);
@@ -93,7 +93,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
(rl_src.location == kLocCompilerTemp));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_src.ref) {
- LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
+ LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, kNotVolatile);
} else {
Load32Disp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
}
@@ -126,7 +126,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest) {
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64);
+ LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, kNotVolatile);
}
}
@@ -215,7 +215,7 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) {
def_start = last_lir_insn_;
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_dest.ref) {
- StoreRefDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
+ StoreRefDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kNotVolatile);
} else {
Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
}
@@ -305,7 +305,7 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) {
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile);
MarkClean(rl_dest);
def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -369,7 +369,7 @@ void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index e53105f..26ea6a8 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -264,9 +264,9 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg);
+ LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg, kNotVolatile);
LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg);
+ StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg, kNotVolatile);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 571adac..c0ad916 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -33,20 +33,16 @@ class MipsMir2Lir FINAL : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
- LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index beaf6bb..903a770 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -294,7 +294,7 @@ bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -310,7 +310,7 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
return true;
}
@@ -524,7 +524,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- LoadBaseDisp(reg_ptr, 0, rl_result.reg, size);
+ LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
FreeTemp(reg_ptr);
StoreValueWide(rl_dest, rl_result);
@@ -602,7 +602,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
- StoreBaseDisp(reg_ptr, 0, rl_src.reg, size);
+ StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 01b25f9..b49f436 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -546,23 +546,31 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
return load;
}
-LIR* MipsMir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
- DCHECK(size != k64 && size != kDouble);
- return LoadBaseDisp(r_base, displacement, r_dest, size);
-}
-
LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
+ OpSize size, VolatileKind is_volatile) {
+ if (is_volatile == kVolatile) {
+ DCHECK(size != k64 && size != kDouble);
+ }
+
// TODO: base this on target.
if (size == kWord) {
size = k32;
}
+ LIR* load;
if (size == k64 || size == kDouble) {
- return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), size);
+ load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), size);
} else {
- return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size);
+ load = LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size);
}
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
+ }
+
+ return load;
}
// FIXME: don't split r_dest into 2 containers.
@@ -648,23 +656,31 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
return res;
}
-LIR* MipsMir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
- DCHECK(size != k64 && size != kDouble);
- return StoreBaseDisp(r_base, displacement, r_src, size);
-}
-
LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
+ OpSize size, VolatileKind is_volatile) {
+ if (is_volatile == kVolatile) {
+ DCHECK(size != k64 && size != kDouble);
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ }
+
// TODO: base this on target.
if (size == kWord) {
size = k32;
}
+ LIR* store;
if (size == k64 || size == kDouble) {
- return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), size);
+ store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), size);
} else {
- return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
+ store = StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
}
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
+ }
+
+ return store;
}
LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 1fc4163..5d68187 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -92,7 +92,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide)
if (!reg_arg.Valid()) {
RegStorage new_reg =
wide ? AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
- LoadBaseDisp(TargetReg(kSp), offset, new_reg, wide ? k64 : k32);
+ LoadBaseDisp(TargetReg(kSp), offset, new_reg, wide ? k64 : k32, kNotVolatile);
return new_reg;
} else {
// Check if we need to copy the arg to a different reg_class.
@@ -120,7 +120,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide)
// If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
if (!reg_arg_low.Valid()) {
RegStorage new_regs = AllocTypedTempWide(false, reg_class);
- LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64);
+ LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64, kNotVolatile);
return new_regs; // The reg_class is OK, we can return.
} else {
// Assume that no ABI allows splitting a wide fp reg between a narrow fp reg and memory,
@@ -193,7 +193,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
if (reg.Valid()) {
OpRegCopy(rl_dest.reg, reg);
} else {
- LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64);
+ LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
}
return;
}
@@ -211,7 +211,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
} else {
- LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64);
+ LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
}
}
}
@@ -243,14 +243,11 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class)
: AllocTypedTemp(rl_dest.fp, reg_class);
}
- if (data.is_volatile) {
- LoadBaseDispVolatile(reg_obj, data.field_offset, r_result, size);
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
+ if (ref) {
+ LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile);
} else {
- LoadBaseDisp(reg_obj, data.field_offset, r_result, size);
+ LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile :
+ kNotVolatile);
}
if (r_result != rl_dest.reg) {
if (wide) {
@@ -288,14 +285,11 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide);
- if (data.is_volatile) {
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- StoreBaseDispVolatile(reg_obj, data.field_offset, reg_src, size);
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
+ if (ref) {
+ StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile);
} else {
- StoreBaseDisp(reg_obj, data.field_offset, reg_src, size);
+ StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile :
+ kNotVolatile);
}
if (ref) {
MarkGCCard(reg_src, reg_obj);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f70087d..b07c85e 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -663,6 +663,7 @@ class Mir2Lir : public Backend {
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
void MarkSafepointPC(LIR* inst);
+ void MarkSafepointPCAfter(LIR* after);
void SetupResourceMasks(LIR* lir);
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
@@ -830,6 +831,7 @@ class Mir2Lir : public Backend {
void GenArrayBoundsCheck(int32_t index, RegStorage length);
LIR* GenNullCheck(RegStorage reg);
void MarkPossibleNullPointerException(int opt_flags);
+ void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after);
void MarkPossibleStackOverflowException();
void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind);
@@ -1007,15 +1009,20 @@ class Mir2Lir : public Backend {
virtual LIR* LoadConstant(RegStorage r_dest, int value);
// Natural word size.
virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
- return LoadBaseDisp(r_base, displacement, r_dest, kWord);
+ return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile);
}
// Load 32 bits, regardless of target.
virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) {
- return LoadBaseDisp(r_base, displacement, r_dest, k32);
+ return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile);
}
// Load a reference at base + displacement and decompress into register.
- virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
- return LoadBaseDisp(r_base, displacement, r_dest, kReference);
+ virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ VolatileKind is_volatile) {
+ return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
+ }
+ // Load a reference at base + index and decompress into register.
+ virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) {
+ return LoadBaseIndexed(r_base, r_index, r_dest, 2, kReference);
}
// Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress.
virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
@@ -1033,15 +1040,20 @@ class Mir2Lir : public Backend {
virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
// Store an item of natural word size.
virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDisp(r_base, displacement, r_src, kWord);
+ return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile);
}
// Store an uncompressed reference into a compressed 32-bit container.
- virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDisp(r_base, displacement, r_src, kReference);
+ virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ VolatileKind is_volatile) {
+ return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
+ }
+ // Store an uncompressed reference into a compressed 32-bit container by index.
+ virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) {
+ return StoreBaseIndexed(r_base, r_index, r_src, 2, kReference);
}
// Store 32 bits, regardless of target.
virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDisp(r_base, displacement, r_src, k32);
+ return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile);
}
/**
@@ -1144,20 +1156,16 @@ class Mir2Lir : public Backend {
virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0;
virtual RegStorage LoadHelper(ThreadOffset<8> offset) = 0;
- virtual LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) = 0;
virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) = 0;
+ OpSize size, VolatileKind is_volatile) = 0;
virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) = 0;
virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_dest, OpSize size) = 0;
virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
- virtual LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) = 0;
virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) = 0;
+ OpSize size, VolatileKind is_volatile) = 0;
virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) = 0;
virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 5bb0ee0..60eebe4 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -735,7 +735,7 @@ void Mir2Lir::FlushRegWide(RegStorage reg) {
}
int v_reg = mir_graph_->SRegToVReg(info1->SReg());
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
}
} else {
RegisterInfo* info = GetRegInfo(reg);
@@ -743,7 +743,7 @@ void Mir2Lir::FlushRegWide(RegStorage reg) {
info->SetIsDirty(false);
int v_reg = mir_graph_->SRegToVReg(info->SReg());
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
}
}
}
@@ -755,7 +755,7 @@ void Mir2Lir::FlushReg(RegStorage reg) {
info->SetIsDirty(false);
int v_reg = mir_graph_->SRegToVReg(info->SReg());
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord);
+ StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord, kNotVolatile);
}
}
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 28195ab..425caec 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -295,7 +295,8 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rs_rX86_ARG0.GetReg());
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0, Gen64Bit() ? k64 : k32);
+ setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0,
+ Gen64Bit() ? k64 : k32, kNotVolatile);
}
FreeTemp(rs_rX86_ARG0);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index d482e58..70382c7 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -68,20 +68,16 @@ class X86Mir2Lir : public Mir2Lir {
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
- LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) OVERRIDE;
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 5082d60..f854adb 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -144,7 +144,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
}
}
@@ -178,7 +178,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
*/
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
+ LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
@@ -363,7 +363,8 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32);
+ StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
+ kNotVolatile);
}
}
@@ -373,7 +374,8 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
FlushSpecificReg(reg_info);
ResetDef(rl_src2.reg);
} else {
- StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32);
+ StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
+ kNotVolatile);
}
}
@@ -433,7 +435,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
+ LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 2f914c1..350cfb8 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -754,7 +754,7 @@ bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
if (size == k64) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -772,12 +772,12 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
if (size == k64) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
}
return true;
}
@@ -1138,7 +1138,7 @@ void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int
NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
case 1:
- LoadBaseDisp(rs_rX86_SP, displacement, dest, k32);
+ LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, kNotVolatile);
break;
default:
m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
@@ -1294,7 +1294,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src1_in_reg) {
NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
+ kNotVolatile);
}
if (is_square) {
@@ -1317,7 +1318,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
+ kNotVolatile);
}
// EAX <- EAX * 1L (2H * 1L)
@@ -1350,7 +1352,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
+ kNotVolatile);
}
// EDX:EAX <- 2L * 1L (double precision)
@@ -2289,21 +2292,21 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class);
+ check_class, kNotVolatile);
} else {
LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class);
- LoadRefDisp(check_class, offset_of_type, check_class);
+ check_class, kNotVolatile);
+ LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
}
} else {
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class);
+ check_class, kNotVolatile);
} else {
LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class);
- LoadRefDisp(check_class, offset_of_type, check_class);
+ check_class, kNotVolatile);
+ LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
}
}
@@ -2350,16 +2353,16 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg);
+ class_reg, kNotVolatile);
} else {
// Load dex cache entry into class_reg (kArg2).
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg);
+ class_reg, kNotVolatile);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() +
(sizeof(mirror::HeapReference<mirror::Class*>) * type_idx);
- LoadRefDisp(class_reg, offset_of_type, class_reg);
+ LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime.
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -2392,7 +2395,8 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
/* Load object->klass_. */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
+ kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
LIR* branchover = nullptr;
if (type_known_final) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 078dd5a..e369d26 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1866,7 +1866,7 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
}
if (cu_->num_ins == 0) {
@@ -1916,11 +1916,11 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
}
if (need_flush) {
if (t_loc->wide && t_loc->fp) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile);
// Increment i to skip the next one
i++;
} else if (t_loc->wide && !t_loc->fp) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile);
// Increment i to skip the next one
i++;
} else {
@@ -2018,14 +2018,14 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
}
next_arg++;
}
@@ -2161,18 +2161,18 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
}
i++;
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
} else {
LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile);
}
}
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index ac5162e..0352808 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -585,7 +585,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
// value.
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
- kDouble);
+ kDouble, kNotVolatile);
res->target = data_target;
res->flags.fixup = kFixupLoad;
store_method_addr_used_ = true;
@@ -756,17 +756,22 @@ LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
}
-LIR* X86Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
+LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size, VolatileKind is_volatile) {
// LoadBaseDisp() will emit correct insn for atomic load on x86
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
- return LoadBaseDisp(r_base, displacement, r_dest, size);
-}
-LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
- return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
- size);
+ LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
+ size);
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
+ }
+
+ return load;
}
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
@@ -854,20 +859,28 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
/* store value base base + scaled index. */
LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
- int scale, OpSize size) {
+ int scale, OpSize size) {
return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
}
-LIR* X86Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement,
- RegStorage r_src, OpSize size) {
+LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) {
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ }
+
// StoreBaseDisp() will emit correct insn for atomic store on x86
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
- return StoreBaseDisp(r_base, displacement, r_src, size);
-}
-LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
- RegStorage r_src, OpSize size) {
- return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
+ LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
+ }
+
+ return store;
}
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,