diff options
author | Andreas Gampe <agampe@google.com> | 2014-06-18 17:01:15 -0700 |
---|---|---|
committer | Andreas Gampe <agampe@google.com> | 2014-06-18 17:11:51 -0700 |
commit | 7cd26f355ba83be75b72ed628ed5ee84a3245c4f (patch) | |
tree | 94152cdd06143bec8c5491dba354cb78214b48c3 /compiler | |
parent | 0c29909cbde112bc9c04da4ce81421e1a0b39f36 (diff) | |
download | art-7cd26f355ba83be75b72ed628ed5ee84a3245c4f.zip art-7cd26f355ba83be75b72ed628ed5ee84a3245c4f.tar.gz art-7cd26f355ba83be75b72ed628ed5ee84a3245c4f.tar.bz2 |
ART: Target-dependent stack overflow, less check elision
Refactor the separate stack overflow reserved sizes from thread.h
into instruction_set.h and make sure they're used in the compiler.
Refactor the decision on when to elide stack overflow checks:
especially with large interpreter stack frames, it is not a good
idea to elide checks when the frame size is even close to the
reserved size. Currently enforce checks when the frame size is
>= 2KB, but make sure that frame sizes 1KB and below will elide
the checks (number from experience).
Bug: 15728765
Change-Id: I016bfd3d8218170cbccbd123ed5e2203db167c06
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/dex/quick/arm/call_arm.cc | 10 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/call_arm64.cc | 10 | ||||
-rw-r--r-- | compiler/dex/quick/mips/call_mips.cc | 3 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 31 | ||||
-rw-r--r-- | compiler/dex/quick/x86/call_x86.cc | 4 |
5 files changed, 44 insertions, 14 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 5466abd..2bdf3e4 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -356,11 +356,11 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - bool skip_overflow_check = (mir_graph_->MethodIsLeaf() && - (static_cast<size_t>(frame_size_) < - Thread::kStackOverflowReservedBytes)); + bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm); NewLIR0(kPseudoMethodEntry); - bool large_frame = (static_cast<size_t>(frame_size_) > Thread::kStackOverflowReservedUsableBytes); + constexpr size_t kStackOverflowReservedUsableBytes = kArmStackOverflowReservedBytes - + Thread::kStackOverflowSignalReservedBytes; + bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes); if (!skip_overflow_check) { if (Runtime::Current()->ExplicitStackOverflowChecks()) { if (!large_frame) { @@ -381,7 +381,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { // This is done before the callee save instructions to avoid any possibility // of these overflowing. This uses r12 and that's never saved in a callee // save. - OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, Thread::kStackOverflowReservedBytes); + OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, kArmStackOverflowReservedBytes); Load32Disp(rs_r12, 0, rs_r12); MarkPossibleStackOverflowException(); } diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index f1748ef..35263ea 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -337,13 +337,13 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - bool skip_overflow_check = (mir_graph_->MethodIsLeaf() && - (static_cast<size_t>(frame_size_) < - Thread::kStackOverflowReservedBytes)); + bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm64); NewLIR0(kPseudoMethodEntry); - const bool large_frame = (static_cast<size_t>(frame_size_) > Thread::kStackOverflowReservedUsableBytes); + constexpr size_t kStackOverflowReservedUsableBytes = kArm64StackOverflowReservedBytes - + Thread::kStackOverflowSignalReservedBytes; + const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes; const int spill_count = num_core_spills_ + num_fp_spills_; const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment. const int frame_size_without_spills = frame_size_ - spill_size; @@ -412,7 +412,7 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) // Branch to throw target if there is not enough room. OpRegRegImm(kOpSub, rs_x9, rs_rA64_SP, frame_size_without_spills); LoadWordDisp(rs_rA64_SELF, Thread::StackEndOffset<8>().Int32Value(), rs_x8); - LIR* branch = OpCmpBranch(kCondUlt, rs_rA64_SP, rs_x8, nullptr); + LIR* branch = OpCmpBranch(kCondUlt, rs_x9, rs_x8, nullptr); AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size)); OpRegCopy(rs_rA64_SP, rs_x9); // Establish stack after checks. } else { diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index c734202..e53105f 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -305,8 +305,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - bool skip_overflow_check = (mir_graph_->MethodIsLeaf() && - (static_cast<size_t>(frame_size_) < Thread::kStackOverflowReservedBytes)); + bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kMips); NewLIR0(kPseudoMethodEntry); RegStorage check_reg = AllocTemp(); RegStorage new_sp = AllocTemp(); diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index ca65432..f70087d 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -25,6 +25,7 @@ #include "dex/backend.h" #include "dex/quick/resource_mask.h" #include "driver/compiler_driver.h" +#include "instruction_set.h" #include "leb128.h" #include "safe_map.h" #include "utils/array_ref.h" @@ -206,6 +207,36 @@ Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_grap #define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) #define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) +// Size of a frame that we definitely consider large. Anything larger than this should +// definitely get a stack overflow check. +static constexpr size_t kLargeFrameSize = 2 * KB; + +// Size of a frame that should be small. Anything leaf method smaller than this should run +// without a stack overflow check. +// The constant is from experience with frameworks code. +static constexpr size_t kSmallFrameSize = 1 * KB; + +// Determine whether a frame is small or large, used in the decision on whether to elide a +// stack overflow check on method entry. +// +// A frame is considered large when it's either above kLargeFrameSize, or a quarter of the +// overflow-usable stack space. +static constexpr bool IsLargeFrame(size_t size, InstructionSet isa) { + return size >= kLargeFrameSize || size >= GetStackOverflowReservedBytes(isa) / 4; +} + +// We want to ensure that on all systems kSmallFrameSize will lead to false in IsLargeFrame. +COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kArm), + kSmallFrameSize_is_not_a_small_frame_arm); +COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kArm64), + kSmallFrameSize_is_not_a_small_frame_arm64); +COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kMips), + kSmallFrameSize_is_not_a_small_frame_mips); +COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kX86), + kSmallFrameSize_is_not_a_small_frame_x86); +COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kX86_64), + kSmallFrameSize_is_not_a_small_frame_x64_64); + class Mir2Lir : public Backend { public: /* diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index dd5dab2..28195ab 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -235,8 +235,8 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - const bool skip_overflow_check = (mir_graph_->MethodIsLeaf() && - (static_cast<size_t>(frame_size_) < Thread::kStackOverflowReservedBytes)); + const bool skip_overflow_check = mir_graph_->MethodIsLeaf() && + !IsLargeFrame(frame_size_, Gen64Bit() ? kX86_64 : kX86); NewLIR0(kPseudoMethodEntry); /* Spill core callee saves */ SpillCoreRegs(); |