summaryrefslogtreecommitdiffstats
path: root/compiler/dex/quick/gen_common.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/dex/quick/gen_common.cc')
-rw-r--r--compiler/dex/quick/gen_common.cc129
1 files changed, 59 insertions, 70 deletions
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b00cbeb..e36b592 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -196,15 +196,6 @@ void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
}
}
-void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
- if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return;
- }
- MarkSafepointPCAfter(after);
- }
-}
-
void Mir2Lir::MarkPossibleStackOverflowException() {
if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
MarkSafepointPC(last_lir_insn_);
@@ -515,7 +506,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
for (int i = 0; i < elems; i++) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
Store32Disp(TargetReg(kRet0),
- mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(rl_arg.reg)) {
FreeTemp(rl_arg.reg);
@@ -584,8 +575,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTempRef();
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
if (IsTemp(rl_method.reg)) {
FreeTemp(rl_method.reg);
}
@@ -602,10 +592,9 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
- LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
+ LoadRefDisp(r_base, offset_of_field, r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -637,12 +626,14 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
} else {
rl_src = LoadValue(rl_src, reg_class);
}
- if (is_object) {
- StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ if (field_info.IsVolatile()) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ StoreBaseDispVolatile(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
} else {
- StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
MarkGCCard(rl_src.reg, r_base);
@@ -681,8 +672,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTempRef();
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
@@ -695,10 +685,9 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
- kNotVolatile);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
- LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
+ LoadRefDisp(r_base, offset_of_field, r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -728,12 +717,14 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
int field_offset = field_info.FieldOffset().Int32Value();
- if (is_object) {
- LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile :
- kNotVolatile);
+ if (field_info.IsVolatile()) {
+ LoadBaseDispVolatile(r_base, field_offset, rl_result.reg, load_size);
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
} else {
- LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ?
- kVolatile : kNotVolatile);
+ LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size);
}
FreeTemp(r_base);
@@ -794,15 +785,17 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
GenNullCheck(rl_obj.reg, opt_flags);
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
int field_offset = field_info.FieldOffset().Int32Value();
- LIR* load_lir;
- if (is_object) {
- load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ?
- kVolatile : kNotVolatile);
+ if (field_info.IsVolatile()) {
+ LoadBaseDispVolatile(rl_obj.reg, field_offset, rl_result.reg, load_size);
+ MarkPossibleNullPointerException(opt_flags);
+ // Without context sensitive analysis, we must issue the most conservative barriers.
+ // In this case, either a load or store may follow so we issue both barriers.
+ GenMemBarrier(kLoadLoad);
+ GenMemBarrier(kLoadStore);
} else {
- load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size);
+ MarkPossibleNullPointerException(opt_flags);
}
- MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir);
if (is_long_or_double) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -854,15 +847,17 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
}
GenNullCheck(rl_obj.reg, opt_flags);
int field_offset = field_info.FieldOffset().Int32Value();
- LIR* store;
- if (is_object) {
- store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ?
- kVolatile : kNotVolatile);
+ if (field_info.IsVolatile()) {
+ // There might have been a store before this volatile one so insert StoreStore barrier.
+ GenMemBarrier(kStoreStore);
+ StoreBaseDispVolatile(rl_obj.reg, field_offset, rl_src.reg, store_size);
+ MarkPossibleNullPointerException(opt_flags);
+ // A load might follow the volatile store so insert a StoreLoad barrier.
+ GenMemBarrier(kStoreLoad);
} else {
- store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size,
- field_info.IsVolatile() ? kVolatile : kNotVolatile);
+ StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size);
+ MarkPossibleNullPointerException(opt_flags);
}
- MarkPossibleNullPointerExceptionAfter(opt_flags, store);
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
MarkGCCard(rl_src.reg, rl_obj.reg);
}
@@ -921,9 +916,9 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile);
+ LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile);
+ LoadRefDisp(res_reg, offset_of_type, rl_result.reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
@@ -994,10 +989,10 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
LoadCurrMethodDirect(r_method);
}
LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
- TargetReg(kArg0), kNotVolatile);
+ TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
- LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0), kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
@@ -1036,9 +1031,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
RegStorage res_reg = AllocTempRef();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg,
- kNotVolatile);
- LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ LoadRefDisp(res_reg, offset_of_string, rl_result.reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -1139,17 +1133,14 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class,
- kNotVolatile);
- LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
- kNotVolatile);
+ LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
- kNotVolatile);
+ check_class);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
LIR* ne_branchover = NULL;
@@ -1205,14 +1196,14 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -1240,8 +1231,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
- kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
LIR* branchover = NULL;
if (type_known_final) {
@@ -1354,13 +1344,13 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
@@ -1415,7 +1405,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
if (load_) {
m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
- m2l_->TargetReg(kArg1), kNotVolatile);
+ m2l_->TargetReg(kArg1));
}
if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2),
@@ -1446,8 +1436,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
- kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);