summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/quick/arm/call_arm.cc29
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h1
-rw-r--r--compiler/dex/quick/arm/int_arm.cc18
-rw-r--r--compiler/dex/quick/arm/target_arm.cc7
-rw-r--r--compiler/dex/quick/gen_common.cc118
-rw-r--r--compiler/dex/quick/gen_invoke.cc24
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h1
-rw-r--r--compiler/dex/quick/mips/int_mips.cc4
-rw-r--r--compiler/dex/quick/mips/target_mips.cc8
-rw-r--r--compiler/dex/quick/mir_to_lir.cc2
-rw-r--r--compiler/dex/quick/mir_to_lir.h6
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h1
-rw-r--r--compiler/dex/quick/x86/int_x86.cc4
-rw-r--r--compiler/dex/quick/x86/target_x86.cc7
-rw-r--r--runtime/Android.mk15
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc148
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc46
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc46
-rw-r--r--runtime/arch/x86_64/fault_handler_x86_64.cc46
-rw-r--r--runtime/class_linker.cc32
-rw-r--r--runtime/fault_handler.cc181
-rw-r--r--runtime/fault_handler.h91
-rw-r--r--runtime/gc/space/image_space.cc31
-rw-r--r--runtime/mirror/art_method.cc10
-rw-r--r--runtime/mirror/art_method.h3
-rw-r--r--runtime/oat.cc2
-rw-r--r--runtime/parsed_options.cc82
-rw-r--r--runtime/parsed_options.h5
-rw-r--r--runtime/runtime.cc30
-rw-r--r--runtime/runtime.h16
-rw-r--r--runtime/stack.cc4
-rw-r--r--runtime/stack.h2
-rw-r--r--runtime/thread.cc4
-rw-r--r--runtime/thread.h22
-rw-r--r--runtime/thread_list.cc1
35 files changed, 978 insertions, 69 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 0fce5bb..bba3d40 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -214,8 +214,9 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
GenMemBarrier(kLoadLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
- GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+ GenNullCheck(r0, opt_flags);
LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ MarkPossibleNullPointerException(opt_flags);
NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
OpRegImm(kOpCmp, r1, 0);
OpIT(kCondEq, "");
@@ -273,8 +274,9 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
GenMemBarrier(kStoreLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
- GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+ GenNullCheck(r0, opt_flags);
LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
+ MarkPossibleNullPointerException(opt_flags);
LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
LoadConstantNoClobber(r3, 0);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
@@ -340,8 +342,10 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Thread::kStackOverflowReservedBytes));
NewLIR0(kPseudoMethodEntry);
if (!skip_overflow_check) {
- /* Load stack limit */
- LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ /* Load stack limit */
+ LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ }
}
/* Spill core callee saves */
NewLIR1(kThumb2Push, core_spill_mask_);
@@ -355,9 +359,20 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
NewLIR1(kThumb2VPushCS, num_fp_spills_);
}
if (!skip_overflow_check) {
- OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
- GenRegRegCheck(kCondUlt, rARM_LR, r12, kThrowStackOverflow);
- OpRegCopy(rARM_SP, rARM_LR); // Establish stack
+ if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
+ GenRegRegCheck(kCondUlt, rARM_LR, r12, kThrowStackOverflow);
+ OpRegCopy(rARM_SP, rARM_LR); // Establish stack
+ } else {
+ // Implicit stack overflow check.
+ // Generate a load from [sp, #-framesize]. If this is in the stack
+ // redzone we will get a segmentation fault.
+ uint32_t full_frame_size = frame_size_ - (spill_count * 4);
+
+ OpRegImm(kOpSub, rARM_SP, full_frame_size);
+ LoadWordDisp(rARM_SP, 0, rARM_LR);
+ MarkPossibleStackOverflowException();
+ }
} else {
OpRegImm(kOpSub, rARM_SP, frame_size_ - (spill_count * 4));
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 2c0cead..6e72c80 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -30,6 +30,7 @@ class ArmMir2Lir : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
int LoadHelper(ThreadOffset offset);
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index fb2096f..d22219a 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -938,7 +938,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -946,6 +946,9 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
/* Get len */
LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
}
if (rl_dest.wide || rl_dest.fp || constant_index) {
int reg_ptr;
@@ -969,13 +972,16 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
+ INVALID_SREG);
+ MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
}
StoreValueWide(rl_dest, rl_result);
} else {
LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG);
+ MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
}
@@ -993,6 +999,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
+ MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
}
@@ -1038,7 +1045,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -1047,6 +1054,9 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
// NOTE: max live temps(4) here.
/* Get len */
LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
}
/* at this point, reg_ptr points to array, 2 live temps */
if (rl_src.wide || rl_src.fp || constant_index) {
@@ -1073,6 +1083,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
} else {
StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size);
}
+ MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
@@ -1083,6 +1094,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
scale, size);
+ MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 01d669b..7f8656a 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -727,6 +727,13 @@ int ArmMir2Lir::LoadHelper(ThreadOffset offset) {
return rARM_LR;
}
+LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
+ int tmp = r0;
+ LoadWordDisp(rARM_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
+ LIR* load2 = LoadWordDisp(tmp, 0, tmp);
+ return load2;
+}
+
uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) {
DCHECK(!IsPseudoLirOp(opcode));
return ArmMir2Lir::EncodingMap[opcode].flags;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1c5f6a0..2384c7f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -66,12 +66,45 @@ LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKin
return branch;
}
+
/* Perform null-check on a register. */
-LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) {
- if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
+LIR* Mir2Lir::GenNullCheck(int m_reg, int opt_flags) {
+ if (Runtime::Current()->ExplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return NULL;
+ }
+ return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
+ }
+ return nullptr;
+}
+
+void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ MarkSafepointPC(last_lir_insn_);
+ }
+}
+
+void Mir2Lir::MarkPossibleStackOverflowException() {
+ if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
+ MarkSafepointPC(last_lir_insn_);
+ }
+}
+
+void Mir2Lir::ForceImplicitNullCheck(int reg, int opt_flags) {
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ // Force an implicit null check by performing a memory operation (load) from the given
+ // register with offset 0. This will cause a signal if the register contains 0 (null).
+ int tmp = AllocTemp();
+ LIR* load = LoadWordDisp(reg, 0, tmp);
+ FreeTemp(tmp);
+ MarkSafepointPC(load);
}
- return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
}
/* Perform check on two registers */
@@ -680,12 +713,14 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
LoadBaseDispWide(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ rl_result.reg.GetReg(),
+ rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
@@ -703,9 +738,10 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
LoadBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low);
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
@@ -739,25 +775,27 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
if (is_long_or_double) {
int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
reg_ptr = AllocTemp();
OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
StoreBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_src.reg.GetReg(), kWord);
+ rl_src.reg.GetReg(), kWord);
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
@@ -1929,31 +1967,53 @@ void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTest(int opt_flags) {
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
- return;
+ if (Runtime::Current()->ExplicitSuspendChecks()) {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ return;
+ }
+ FlushAllRegs();
+ LIR* branch = OpTestSuspend(NULL);
+ LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
+ LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
+ current_dalvik_offset_);
+ branch->target = target;
+ suspend_launchpads_.Insert(target);
+ } else {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ return;
+ }
+ FlushAllRegs(); // TODO: needed?
+ LIR* inst = CheckSuspendUsingLoad();
+ MarkSafepointPC(inst);
}
- FlushAllRegs();
- LIR* branch = OpTestSuspend(NULL);
- LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
- LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
- current_dalvik_offset_);
- branch->target = target;
- suspend_launchpads_.Insert(target);
}
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ if (Runtime::Current()->ExplicitSuspendChecks()) {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ OpUnconditionalBranch(target);
+ return;
+ }
+ OpTestSuspend(target);
+ LIR* launch_pad =
+ RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
+ current_dalvik_offset_);
+ FlushAllRegs();
+ OpUnconditionalBranch(launch_pad);
+ suspend_launchpads_.Insert(launch_pad);
+ } else {
+ // For the implicit suspend check, just perform the trigger
+ // load and branch to the target.
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ OpUnconditionalBranch(target);
+ return;
+ }
+ FlushAllRegs();
+ LIR* inst = CheckSuspendUsingLoad();
+ MarkSafepointPC(inst);
OpUnconditionalBranch(target);
- return;
}
- OpTestSuspend(target);
- LIR* launch_pad =
- RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
- current_dalvik_offset_);
- FlushAllRegs();
- OpUnconditionalBranch(launch_pad);
- suspend_launchpads_.Insert(launch_pad);
}
/* Call out to helper assembly routine that will null check obj and then lock it. */
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 5d2886e..859a033 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -489,10 +489,11 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
break;
}
case 1: // Is "this" null? [use kArg1]
- cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+ cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
+ cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
@@ -543,10 +544,11 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
break;
}
case 2: // Is "this" null? [use kArg1]
- cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+ cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// Get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
+ cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
@@ -753,7 +755,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
type, skip_this);
if (pcrLabel) {
- *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+ *pcrLabel = GenNullCheck(TargetReg(kArg1), info->opt_flags);
}
return call_state;
}
@@ -957,7 +959,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+ *pcrLabel = GenNullCheck(TargetReg(kArg1), info->opt_flags);
}
return call_state;
}
@@ -1004,7 +1006,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
rl_idx = LoadValue(rl_idx, kCoreReg);
}
int reg_max;
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* range_check_branch = nullptr;
int reg_off = INVALID_REG;
@@ -1015,8 +1017,10 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
if (range_check) {
reg_max = AllocTemp();
LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
+ MarkPossibleNullPointerException(info->opt_flags);
}
LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
+ MarkPossibleNullPointerException(info->opt_flags);
LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
@@ -1082,8 +1086,10 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
rl_obj = LoadValue(rl_obj, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
- LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg());
+ GenNullCheck(rl_obj.reg.GetReg(), info->opt_flags);
+ LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(),
+ rl_result.reg.GetReg());
+ MarkPossibleNullPointerException(info->opt_flags);
if (is_empty) {
// dst = (dst == 0);
if (cu_->instruction_set == kThumb2) {
@@ -1281,7 +1287,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
LoadValueDirectFixed(rl_start, reg_start);
}
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf));
- GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
+ GenNullCheck(reg_ptr, info->opt_flags);
LIR* high_code_point_branch =
rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
// NOTE: not a safepoint
@@ -1319,7 +1325,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
LoadValueDirectFixed(rl_cmp, reg_cmp);
int r_tgt = (cu_->instruction_set != kX86) ?
LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
- GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
+ GenNullCheck(reg_this, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// TUNING: check if rl_cmp.s_reg_low is already null checked
LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 61eb68d..28ebe0e 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -30,6 +30,7 @@ class MipsMir2Lir : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
int LoadHelper(ThreadOffset offset);
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index fec801b..9fcc8bb 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -471,7 +471,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
int reg_ptr = AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -548,7 +548,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4f495ee..b7fb2f4 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -512,6 +512,14 @@ int MipsMir2Lir::LoadHelper(ThreadOffset offset) {
return r_T9;
}
+LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
+ int tmp = AllocTemp();
+ LoadWordDisp(rMIPS_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
+ LIR *inst = LoadWordDisp(tmp, 0, tmp);
+ FreeTemp(tmp);
+ return inst;
+}
+
void MipsMir2Lir::SpillCoreRegs() {
if (num_core_spills_ == 0) {
return;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index f93a5e3..538c292 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -437,7 +437,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kCoreReg);
- GenNullCheck(rl_src[0].s_reg_low, rl_src[0].reg.GetReg(), opt_flags);
+ GenNullCheck(rl_src[0].reg.GetReg(), opt_flags);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
LoadWordDisp(rl_src[0].reg.GetReg(), len_offset, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index e2326bb..048b157 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -546,7 +546,10 @@ class Mir2Lir : public Backend {
LIR* GenCheck(ConditionCode c_code, ThrowKind kind);
LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val,
ThrowKind kind);
- LIR* GenNullCheck(int s_reg, int m_reg, int opt_flags);
+ LIR* GenNullCheck(int m_reg, int opt_flags);
+ void MarkPossibleNullPointerException(int opt_flags);
+ void MarkPossibleStackOverflowException();
+ void ForceImplicitNullCheck(int reg, int opt_flags);
LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
ThrowKind kind);
void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
@@ -796,6 +799,7 @@ class Mir2Lir : public Backend {
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
virtual int LoadHelper(ThreadOffset offset) = 0;
+ virtual LIR* CheckSuspendUsingLoad() = 0;
virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg) = 0;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 8269898..275a2d9 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -30,6 +30,7 @@ class X86Mir2Lir : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
int LoadHelper(ThreadOffset offset);
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index d7eeeac..a67c43c 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1298,7 +1298,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
@@ -1352,7 +1352,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index c70efa4..7b2be0e 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -552,6 +552,11 @@ int X86Mir2Lir::LoadHelper(ThreadOffset offset) {
return INVALID_REG;
}
+LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
+ LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
+ return nullptr;
+}
+
uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
DCHECK(!IsPseudoLirOp(opcode));
return X86Mir2Lir::EncodingMap[opcode].flags;
@@ -975,7 +980,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Is the string non-NULL?
LoadValueDirectFixed(rl_obj, rDX);
- GenNullCheck(rl_obj.s_reg_low, rDX, info->opt_flags);
+ GenNullCheck(rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// Does the character fit in 16 bits?
diff --git a/runtime/Android.mk b/runtime/Android.mk
index e094850..983b48f 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -139,6 +139,7 @@ LIBART_COMMON_SRC_FILES := \
trace.cc \
transaction.cc \
profiler.cc \
+ fault_handler.cc \
utf.cc \
utils.cc \
verifier/dex_gc_map.cc \
@@ -207,7 +208,8 @@ LIBART_TARGET_SRC_FILES_arm := \
arch/arm/portable_entrypoints_arm.S \
arch/arm/quick_entrypoints_arm.S \
arch/arm/arm_sdiv.S \
- arch/arm/thread_arm.cc
+ arch/arm/thread_arm.cc \
+ arch/arm/fault_handler_arm.cc
LIBART_TARGET_SRC_FILES_x86 := \
arch/x86/context_x86.cc \
@@ -215,7 +217,8 @@ LIBART_TARGET_SRC_FILES_x86 := \
arch/x86/jni_entrypoints_x86.S \
arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
- arch/x86/thread_x86.cc
+ arch/x86/thread_x86.cc \
+ arch/x86/fault_handler_x86.cc
LIBART_TARGET_SRC_FILES_x86_64 := \
arch/x86_64/context_x86_64.cc \
@@ -224,7 +227,8 @@ LIBART_TARGET_SRC_FILES_x86_64 := \
arch/x86_64/portable_entrypoints_x86_64.S \
arch/x86_64/quick_entrypoints_x86_64.S \
arch/x86_64/thread_x86_64.cc \
- monitor_pool.cc
+ monitor_pool.cc \
+ arch/x86_64/fault_handler_x86_64.cc
LIBART_TARGET_SRC_FILES_mips := \
@@ -233,7 +237,8 @@ LIBART_TARGET_SRC_FILES_mips := \
arch/mips/jni_entrypoints_mips.S \
arch/mips/portable_entrypoints_mips.S \
arch/mips/quick_entrypoints_mips.S \
- arch/mips/thread_mips.cc
+ arch/mips/thread_mips.cc \
+ arch/mips/fault_mhandlerarm.cc
ifeq ($(TARGET_ARCH),arm64)
$(info TODOArm64: $(LOCAL_PATH)/Android.mk Add Arm64 specific runtime files)
@@ -263,6 +268,7 @@ LIBART_HOST_SRC_FILES += \
arch/x86_64/portable_entrypoints_x86_64.S \
arch/x86_64/quick_entrypoints_x86_64.S \
arch/x86_64/thread_x86_64.cc \
+ arch/x86_64/fault_handler_x86_64.cc \
monitor_pool.cc
else
LIBART_HOST_SRC_FILES += \
@@ -271,6 +277,7 @@ LIBART_HOST_SRC_FILES += \
arch/x86/jni_entrypoints_x86.S \
arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
+ arch/x86/fault_handler_x86.cc \
arch/x86/thread_x86.cc
endif
else # HOST_ARCH != x86
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
new file mode 100644
index 0000000..c748ce9
--- /dev/null
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+#include "thread.h"
+#include "thread-inl.h"
+
+//
+// ARM specific fault handler functions.
+//
+
+namespace art {
+
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_test_suspend();
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+ struct ucontext *uc = (struct ucontext *)context;
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uintptr_t* sp = reinterpret_cast<uint32_t*>(sc->arm_sp);
+ if (sp == nullptr) {
+ return;
+ }
+
+ // Work out the return PC. This will be the address of the instruction
+ // following the faulting ldr/str instruction. This is in thumb mode so
+ // the instruction might be a 16 or 32 bit one. Also, the GC map always
+ // has the bottom bit of the PC set so we also need to set that.
+
+ // Need to work out the size of the instruction that caused the exception.
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sc->arm_pc);
+
+ uint16_t instr = ptr[0] | ptr[1] << 8;
+ bool is_32bit = ((instr & 0xF000) == 0xF000) || ((instr & 0xF800) == 0xE800);
+ uint32_t instr_size = is_32bit ? 4 : 2;
+
+ // The method is at the top of the stack.
+ method = sp[0];
+
+ return_pc = (sc->arm_pc + instr_size) | 1;
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ // The code that looks for the catch location needs to know the value of the
+ // ARM PC at the point of call. For Null checks we insert a GC map that is immediately after
+ // the load/store instruction that might cause the fault. However the mapping table has
+ // the low bits set for thumb mode so we need to set the bottom bit for the LR
+ // register in order to find the mapping.
+
+ // Need to work out the size of the instruction that caused the exception.
+ struct ucontext *uc = (struct ucontext *)context;
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sc->arm_pc);
+
+ uint16_t instr = ptr[0] | ptr[1] << 8;
+ bool is_32bit = ((instr & 0xF000) == 0xF000) || ((instr & 0xF800) == 0xE800);
+ uint32_t instr_size = is_32bit ? 4 : 2;
+ sc->arm_lr = (sc->arm_pc + instr_size) | 1; // LR needs to point to gc map location
+ sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ LOG(DEBUG) << "Generating null pointer exception";
+ return true;
+}
+
+// A suspend check is done using the following instruction sequence:
+// 0xf723c0b2: f8d902c0 ldr.w r0, [r9, #704] ; suspend_trigger_
+// .. some intervening instruction
+// 0xf723c0b6: 6800 ldr r0, [r0, #0]
+
+// The offset from r9 is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault (at PC-4 and PC).
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the ldr r0,[r9,#xxx]
+ // where xxx is the offset of the suspend trigger.
+ uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset().Int32Value();
+ uint16_t checkinst2 = 0x6800;
+
+ struct ucontext *uc = (struct ucontext *)context;
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->arm_pc);
+ uint8_t* ptr1 = ptr2 - 4;
+ LOG(DEBUG) << "checking suspend";
+
+ uint16_t inst2 = ptr2[0] | ptr2[1] << 8;
+ LOG(DEBUG) << "inst2: " << std::hex << inst2 << " checkinst2: " << checkinst2;
+ if (inst2 != checkinst2) {
+ // Second instruction is not good, not ours.
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = ptr1 - 40; // Compiler will hoist to a max of 20 instructions.
+ bool found = false;
+ while (ptr1 > limit) {
+ uint32_t inst1 = ((ptr1[0] | ptr1[1] << 8) << 16) | (ptr1[2] | ptr1[3] << 8);
+ LOG(DEBUG) << "inst1: " << std::hex << inst1 << " checkinst1: " << checkinst1;
+ if (inst1 == checkinst1) {
+ found = true;
+ break;
+ }
+ ptr1 -= 2; // Min instruction size is 2 bytes.
+ }
+ if (found) {
+ LOG(DEBUG) << "suspend check match";
+ // This is a suspend check. Arrange for the signal handler to return to
+ // art_quick_test_suspend. Also set LR so that after the suspend check it
+ // will resume the instruction (current PC + 2). PC points to the
+ // ldr r0,[r0,#0] instruction (r0 will be 0, set by the trigger).
+
+ // NB: remember that we need to set the bottom bit of the LR register
+ // to switch to thumb mode.
+ LOG(DEBUG) << "arm lr: " << std::hex << sc->arm_lr;
+ LOG(DEBUG) << "arm pc: " << std::hex << sc->arm_pc;
+ sc->arm_lr = sc->arm_pc + 3; // +2 + 1 (for thumb)
+ sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ LOG(DEBUG) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
new file mode 100644
index 0000000..8d494c1
--- /dev/null
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+
+
+//
+// Mips specific fault handler functions.
+//
+
+namespace art {
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
new file mode 100644
index 0000000..171a541
--- /dev/null
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+
+
+//
+// X86 specific fault handler functions.
+//
+
+namespace art {
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/arch/x86_64/fault_handler_x86_64.cc b/runtime/arch/x86_64/fault_handler_x86_64.cc
new file mode 100644
index 0000000..3ef19fb
--- /dev/null
+++ b/runtime/arch/x86_64/fault_handler_x86_64.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+
+
+//
+// X86_64 specific fault handler functions.
+//
+
+namespace art {
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index b709da3..b8d1493 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -573,6 +573,38 @@ bool ClassLinker::GenerateOatFile(const char* dex_filename,
argv.push_back("-classpath");
argv.push_back("--runtime-arg");
argv.push_back(Runtime::Current()->GetClassPathString());
+
+ argv.push_back("--runtime-arg");
+ std::string checkstr = "-implicit-checks";
+
+ int nchecks = 0;
+ char checksep = ':';
+
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "null";
+ ++nchecks;
+ }
+ if (!Runtime::Current()->ExplicitSuspendChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "suspend";
+ ++nchecks;
+ }
+
+ if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "stack";
+ ++nchecks;
+ }
+
+ if (nchecks == 0) {
+ checkstr += ":none";
+ }
+ argv.push_back(checkstr);
+
if (!kIsTargetBuild) {
argv.push_back("--host");
}
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
new file mode 100644
index 0000000..6399c0d
--- /dev/null
+++ b/runtime/fault_handler.cc
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fault_handler.h"
+#include <sys/mman.h>
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+#include "thread.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "scoped_thread_state_change.h"
+#include "verify_object-inl.h"
+
+namespace art {
+// Static fault manger object accessed by signal handler.
+FaultManager fault_manager;
+
+// Signal handler called on SIGSEGV.
+static void art_fault_handler(int sig, siginfo_t* info, void* context) {
+ fault_manager.HandleFault(sig, info, context);
+}
+
+FaultManager::FaultManager() {
+ sigaction(SIGSEGV, nullptr, &oldaction_);
+}
+
+FaultManager::~FaultManager() {
+ sigaction(SIGSEGV, &oldaction_, nullptr); // Restore old handler.
+}
+
+void FaultManager::Init() {
+ struct sigaction action;
+ action.sa_sigaction = art_fault_handler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ action.sa_restorer = nullptr;
+ sigaction(SIGSEGV, &action, &oldaction_);
+}
+
+void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
+ bool handled = false;
+ if (IsInGeneratedCode(context)) {
+ for (auto& handler : handlers_) {
+ handled = handler->Action(sig, info, context);
+ if (handled) {
+ return;
+ }
+ }
+ }
+
+ if (!handled) {
+ LOG(INFO)<< "Caught unknown SIGSEGV in ART fault handler";
+ oldaction_.sa_sigaction(sig, info, context);
+ }
+}
+
+void FaultManager::AddHandler(FaultHandler* handler) {
+ handlers_.push_back(handler);
+}
+
+void FaultManager::RemoveHandler(FaultHandler* handler) {
+ for (Handlers::iterator i = handlers_.begin(); i != handlers_.end(); ++i) {
+ FaultHandler* h = *i;
+ if (h == handler) {
+ handlers_.erase(i);
+ return;
+ }
+ }
+}
+
+
+// This function is called within the signal handler. It checks that
+// the mutator_lock is held (shared). No annotalysis is done.
+bool FaultManager::IsInGeneratedCode(void *context) {
+ // We can only be running Java code in the current thread if it
+ // is in Runnable state.
+ Thread* thread = Thread::Current();
+ if (thread == nullptr) {
+ return false;
+ }
+
+ ThreadState state = thread->GetState();
+ if (state != kRunnable) {
+ return false;
+ }
+
+ // Current thread is runnable.
+ // Make sure it has the mutator lock.
+ if (!Locks::mutator_lock_->IsSharedHeld(thread)) {
+ return false;
+ }
+
+ uintptr_t potential_method = 0;
+ uintptr_t return_pc = 0;
+
+ // Get the architecture specific method address and return address. These
+ // are in architecture specific files in arch/<arch>/fault_handler_<arch>.cc
+ GetMethodAndReturnPC(context, /*out*/potential_method, /*out*/return_pc);
+
+ // If we don't have a potential method, we're outta here.
+ if (potential_method == 0) {
+ return false;
+ }
+
+ // Verify that the potential method is indeed a method.
+ // TODO: check the GC maps to make sure it's an object.
+
+ mirror::Object* method_obj =
+ reinterpret_cast<mirror::Object*>(potential_method);
+
+ // Check that the class pointer inside the object is not null and is aligned.
+ mirror::Class* cls = method_obj->GetClass<kVerifyNone>();
+ if (cls == nullptr) {
+ return false;
+ }
+ if (!IsAligned<kObjectAlignment>(cls)) {
+ return false;
+ }
+
+
+ if (!VerifyClassClass(cls)) {
+ return false;
+ }
+
+ // Now make sure the class is a mirror::ArtMethod.
+ if (!cls->IsArtMethodClass()) {
+ return false;
+ }
+
+ // We can be certain that this is a method now. Check if we have a GC map
+ // at the return PC address.
+ mirror::ArtMethod* method =
+ reinterpret_cast<mirror::ArtMethod*>(potential_method);
+ return method->ToDexPc(return_pc, false) != DexFile::kDexNoIndex;
+}
+
+//
+// Null pointer fault handler
+//
+
+NullPointerHandler::NullPointerHandler(FaultManager* manager) {
+ manager->AddHandler(this);
+}
+
+//
+// Suspension fault handler
+//
+
+SuspensionHandler::SuspensionHandler(FaultManager* manager) {
+ manager->AddHandler(this);
+}
+
+//
+// Stack overflow fault handler
+//
+
+StackOverflowHandler::StackOverflowHandler(FaultManager* manager) {
+ manager->AddHandler(this);
+}
+} // namespace art
+
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
new file mode 100644
index 0000000..9fe6e9a
--- /dev/null
+++ b/runtime/fault_handler.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ART_RUNTIME_FAULT_HANDLER_H_
+#define ART_RUNTIME_FAULT_HANDLER_H_
+
+#include <signal.h>
+#include <vector>
+#include <setjmp.h>
+#include <stdint.h>
+
+#include "base/mutex.h" // For annotalysis.
+
+namespace art {
+class FaultHandler;
+
+class FaultManager {
+ public:
+ FaultManager();
+ ~FaultManager();
+
+ void Init();
+
+ void HandleFault(int sig, siginfo_t* info, void* context);
+ void AddHandler(FaultHandler* handler);
+ void RemoveHandler(FaultHandler* handler);
+
+ private:
+ bool IsInGeneratedCode(void *context) NO_THREAD_SAFETY_ANALYSIS;
+ void GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc);
+
+ typedef std::vector<FaultHandler*> Handlers;
+ Handlers handlers_;
+ struct sigaction oldaction_;
+};
+
+class FaultHandler {
+ public:
+ FaultHandler() : manager_(nullptr) {}
+ explicit FaultHandler(FaultManager* manager) : manager_(manager) {}
+ virtual ~FaultHandler() {}
+
+ virtual bool Action(int sig, siginfo_t* siginfo, void* context) = 0;
+ protected:
+ FaultManager* const manager_;
+};
+
+class NullPointerHandler FINAL : public FaultHandler {
+ public:
+ NullPointerHandler() {}
+ explicit NullPointerHandler(FaultManager* manager);
+
+ bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+};
+
+class SuspensionHandler FINAL : public FaultHandler {
+ public:
+ SuspensionHandler() {}
+ explicit SuspensionHandler(FaultManager* manager);
+
+ bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+};
+
+class StackOverflowHandler FINAL : public FaultHandler {
+ public:
+ StackOverflowHandler() {}
+ explicit StackOverflowHandler(FaultManager* manager);
+
+ bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+};
+
+// Statically allocated so the the signal handler can get access to it.
+extern FaultManager fault_manager;
+
+} // namespace art
+#endif // ART_RUNTIME_FAULT_HANDLER_H_
+
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index ca5b5a9..5480639 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -67,6 +67,37 @@ static bool GenerateImage(const std::string& image_file_name, std::string* error
arg_vector.push_back("--runtime-arg");
arg_vector.push_back("-Xmx64m");
+ arg_vector.push_back("--runtime-arg");
+ std::string checkstr = "-implicit-checks";
+ int nchecks = 0;
+ char checksep = ':';
+
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "null";
+ ++nchecks;
+ }
+ if (!Runtime::Current()->ExplicitSuspendChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "suspend";
+ ++nchecks;
+ }
+
+ if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "stack";
+ ++nchecks;
+ }
+
+ if (nchecks == 0) {
+ checkstr += ":none";
+ }
+
+ arg_vector.push_back(checkstr);
+
for (size_t i = 0; i < boot_class_path.size(); i++) {
arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index fe27992..e8a0891 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -157,7 +157,7 @@ uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc) {
return pc - reinterpret_cast<uintptr_t>(code);
}
-uint32_t ArtMethod::ToDexPc(const uintptr_t pc) {
+uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
if (IsPortableCompiled()) {
// Portable doesn't use the machine pc, we just use dex pc instead.
return static_cast<uint32_t>(pc);
@@ -183,9 +183,11 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc) {
return cur.DexPc();
}
}
- LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
- << "(PC " << reinterpret_cast<void*>(pc) << ", code=" << code
- << ") in " << PrettyMethod(this);
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
+ << "(PC " << reinterpret_cast<void*>(pc) << ", code=" << code
+ << ") in " << PrettyMethod(this);
+ }
return DexFile::kDexNoIndex;
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index a9da66c..84a3eb6 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -402,7 +402,8 @@ class MANAGED ArtMethod : public Object {
uintptr_t NativePcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Converts a native PC to a dex PC.
- uint32_t ToDexPc(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Converts a dex PC to a native PC.
uintptr_t ToNativePc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 454786d..d04514f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '1', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '1', '8', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 37db462..5717689 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -15,6 +15,9 @@
*/
#include "parsed_options.h"
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+#endif
#include "debugger.h"
#include "monitor.h"
@@ -191,6 +194,36 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
profile_backoff_coefficient_ = 2.0;
profile_clock_source_ = kDefaultProfilerClockSource;
+ // Default to explicit checks. Switch off with -implicit-checks:.
+ // or setprop dalvik.vm.implicit_checks check1,check2,...
+#ifdef HAVE_ANDROID_OS
+ {
+ char buf[PROP_VALUE_MAX];
+ property_get("dalvik.vm.implicit_checks", buf, "none");
+ std::string checks(buf);
+ std::vector<std::string> checkvec;
+ Split(checks, ',', checkvec);
+ for (auto& str : checkvec) {
+ std::string val = Trim(str);
+ if (val == "none") {
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+ } else if (val == "null") {
+ explicit_checks_ &= ~kExplicitNullCheck;
+ } else if (val == "suspend") {
+ explicit_checks_ &= ~kExplicitSuspendCheck;
+ } else if (val == "stack") {
+ explicit_checks_ &= ~kExplicitStackOverflowCheck;
+ } else if (val == "all") {
+ explicit_checks_ = 0;
+ }
+ }
+ }
+#else
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+#endif
+
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -470,6 +503,54 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
if (!ParseDouble(option, ':', 1.0, 10.0, &profile_backoff_coefficient_)) {
return false;
}
+ } else if (StartsWith(option, "-implicit-checks:")) {
+ std::string checks;
+ if (!ParseStringAfterChar(option, ':', &checks)) {
+ return false;
+ }
+ std::vector<std::string> checkvec;
+ Split(checks, ',', checkvec);
+ for (auto& str : checkvec) {
+ std::string val = Trim(str);
+ if (val == "none") {
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+ } else if (val == "null") {
+ explicit_checks_ &= ~kExplicitNullCheck;
+ } else if (val == "suspend") {
+ explicit_checks_ &= ~kExplicitSuspendCheck;
+ } else if (val == "stack") {
+ explicit_checks_ &= ~kExplicitStackOverflowCheck;
+ } else if (val == "all") {
+ explicit_checks_ = 0;
+ } else {
+ return false;
+ }
+ }
+ } else if (StartsWith(option, "-explicit-checks:")) {
+ std::string checks;
+ if (!ParseStringAfterChar(option, ':', &checks)) {
+ return false;
+ }
+ std::vector<std::string> checkvec;
+ Split(checks, ',', checkvec);
+ for (auto& str : checkvec) {
+ std::string val = Trim(str);
+ if (val == "none") {
+ explicit_checks_ = 0;
+ } else if (val == "null") {
+ explicit_checks_ |= kExplicitNullCheck;
+ } else if (val == "suspend") {
+ explicit_checks_ |= kExplicitSuspendCheck;
+ } else if (val == "stack") {
+ explicit_checks_ |= kExplicitStackOverflowCheck;
+ } else if (val == "all") {
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+ } else {
+ return false;
+ }
+ }
} else if (option == "-Xcompiler-option") {
i++;
if (i == options.size()) {
@@ -488,6 +569,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
StartsWith(option, "-da:") ||
StartsWith(option, "-enableassertions:") ||
StartsWith(option, "-disableassertions:") ||
+ (option == "--runtime-arg") ||
(option == "-esa") ||
(option == "-dsa") ||
(option == "-enablesystemassertions") ||
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index f07bba1..d6516a8 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -81,6 +81,11 @@ class ParsedOptions {
double profile_backoff_coefficient_;
ProfilerClockSource profile_clock_source_;
+ static constexpr uint32_t kExplicitNullCheck = 1;
+ static constexpr uint32_t kExplicitSuspendCheck = 2;
+ static constexpr uint32_t kExplicitStackOverflowCheck = 4;
+ uint32_t explicit_checks_;
+
private:
ParsedOptions() {}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d1c8370..1555bf2 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -123,7 +123,10 @@ Runtime::Runtime()
system_thread_group_(nullptr),
system_class_loader_(nullptr),
dump_gc_performance_on_shutdown_(false),
- preinitialization_transaction(nullptr) {
+ preinitialization_transaction(nullptr),
+ null_pointer_handler_(nullptr),
+ suspend_handler_(nullptr),
+ stack_overflow_handler_(nullptr) {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
callee_save_methods_[i] = nullptr;
}
@@ -170,6 +173,10 @@ Runtime::~Runtime() {
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
+
+ delete null_pointer_handler_;
+ delete suspend_handler_;
+ delete stack_overflow_handler_;
}
struct AbortState {
@@ -515,6 +522,27 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) {
GetInstrumentation()->ForceInterpretOnly();
}
+ if (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
+ ParsedOptions::kExplicitNullCheck |
+ ParsedOptions::kExplicitStackOverflowCheck)) {
+ // Initialize the fault manager.
+ fault_manager.Init();
+
+ // These need to be in a specific order. The null point check must be
+ // the last in the list.
+ if ((options->explicit_checks_ & ParsedOptions::kExplicitSuspendCheck) == 0) {
+ suspend_handler_ = new SuspensionHandler(&fault_manager);
+ }
+
+ if ((options->explicit_checks_ & ParsedOptions::kExplicitStackOverflowCheck) == 0) {
+ stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
+ }
+
+ if ((options->explicit_checks_ & ParsedOptions::kExplicitNullCheck) == 0) {
+ null_pointer_handler_ = new NullPointerHandler(&fault_manager);
+ }
+ }
+
heap_ = new gc::Heap(options->heap_initial_size_,
options->heap_growth_limit_,
options->heap_min_free_,
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 109f031..eeaaa2b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -36,6 +36,7 @@
#include "object_callbacks.h"
#include "runtime_stats.h"
#include "safe_map.h"
+#include "fault_handler.h"
namespace art {
@@ -404,6 +405,18 @@ class Runtime {
return fault_message_;
}
+ bool ExplicitNullChecks() const {
+ return null_pointer_handler_ == nullptr;
+ }
+
+ bool ExplicitSuspendChecks() const {
+ return suspend_handler_ == nullptr;
+ }
+
+ bool ExplicitStackOverflowChecks() const {
+ return stack_overflow_handler_ == nullptr;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -536,6 +549,9 @@ class Runtime {
// Transaction used for pre-initializing classes at compilation time.
Transaction* preinitialization_transaction;
+ NullPointerHandler* null_pointer_handler_;
+ SuspensionHandler* suspend_handler_;
+ StackOverflowHandler* stack_overflow_handler_;
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 26b4de3..f397afa 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -92,11 +92,11 @@ StackVisitor::StackVisitor(Thread* thread, Context* context)
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
-uint32_t StackVisitor::GetDexPc() const {
+uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
if (cur_shadow_frame_ != NULL) {
return cur_shadow_frame_->GetDexPC();
} else if (cur_quick_frame_ != NULL) {
- return GetMethod()->ToDexPc(cur_quick_frame_pc_);
+ return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
} else {
return 0;
}
diff --git a/runtime/stack.h b/runtime/stack.h
index f840f67..4ee5de1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -520,7 +520,7 @@ class StackVisitor {
return cur_shadow_frame_ != nullptr;
}
- uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fbdf95f..f4b9d9a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -302,6 +302,7 @@ void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
SetUpAlternateSignalStack();
InitCpu();
InitTlsEntryPoints();
+ RemoveSuspendTrigger();
InitCardTable();
InitTid();
// Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
@@ -576,6 +577,7 @@ void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
AtomicClearFlag(kSuspendRequest);
} else {
AtomicSetFlag(kSuspendRequest);
+ TriggerSuspend();
}
}
@@ -643,6 +645,7 @@ bool Thread::RequestCheckpoint(Closure* function) {
checkpoint_functions_[available_checkpoint] = nullptr;
} else {
CHECK_EQ(ReadFlag(kCheckpointRequest), true);
+ TriggerSuspend();
}
return succeeded == 0;
}
@@ -1774,6 +1777,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_
// DO_THREAD_OFFSET(top_of_managed_stack_);
// DO_THREAD_OFFSET(top_of_managed_stack_pc_);
DO_THREAD_OFFSET(top_sirt_);
+ DO_THREAD_OFFSET(suspend_trigger_);
#undef DO_THREAD_OFFSET
size_t entry_point_count = arraysize(gThreadEntryPointInfo);
diff --git a/runtime/thread.h b/runtime/thread.h
index eaffc3e..264a927 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -433,6 +433,10 @@ class PACKED(4) Thread {
return ThreadOffset(OFFSETOF_MEMBER(Thread, state_and_flags_));
}
+ static ThreadOffset ThreadSuspendTriggerOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_trigger_));
+ }
+
// Size of stack less any space reserved for stack overflow
size_t GetStackSize() const {
return stack_size_ - (stack_end_ - stack_begin_);
@@ -824,6 +828,10 @@ class PACKED(4) Thread {
PortableEntryPoints portable_entrypoints_;
QuickEntryPoints quick_entrypoints_;
+ // Setting this to 0 will trigger a SEGV and thus a suspend check. It is normally
+ // set to the address of itself.
+ uintptr_t* suspend_trigger_;
+
// How many times has our pthread key's destructor been called?
uint32_t thread_exit_check_count_;
@@ -838,6 +846,20 @@ class PACKED(4) Thread {
mirror::Object* AllocTlab(size_t bytes);
void SetTlab(byte* start, byte* end);
+ // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
+ // equal to a valid pointer.
+ // TODO: does this need to atomic? I don't think so.
+ void RemoveSuspendTrigger() {
+ suspend_trigger_ = reinterpret_cast<uintptr_t*>(&suspend_trigger_);
+ }
+
+ // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
+ // The next time a suspend check is done, it will load from the value at this address
+ // and trigger a SIGSEGV.
+ void TriggerSuspend() {
+ suspend_trigger_ = nullptr;
+ }
+
// Thread-local rosalloc runs. There are 34 size brackets in rosalloc
// runs (RosAlloc::kNumOfSizeBrackets). We can't refer to the
// RosAlloc class due to a header file circular dependency issue.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ac5750b..ec610e1 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -753,6 +753,7 @@ void ThreadList::Register(Thread* self) {
self->debug_suspend_count_ = debug_suspend_all_count_;
if (self->suspend_count_ > 0) {
self->AtomicSetFlag(kSuspendRequest);
+ self->TriggerSuspend();
}
CHECK(!Contains(self));
list_.push_back(self);