diff options
-rw-r--r-- | compiler/dex/quick/arm/call_arm.cc | 7 | ||||
-rw-r--r-- | runtime/gc/collector/garbage_collector.h | 2 |
2 files changed, 4 insertions, 5 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 175fc06..953e04f 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -359,8 +359,8 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { NewLIR1(kThumb2VPushCS, num_fp_spills_); } - // TODO: 64 bit will be different code. - const int frame_size_without_spills = frame_size_ - spill_count * 4; + const int spill_size = spill_count * 4; + const int frame_size_without_spills = frame_size_ - spill_size; if (!skip_overflow_check) { if (Runtime::Current()->ExplicitStackOverflowChecks()) { class StackOverflowSlowPath : public LIRSlowPath { @@ -392,8 +392,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills); LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr); // Need to restore LR since we used it as a temp. - AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, - frame_size_without_spills)); + AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, spill_size)); OpRegCopy(rs_rARM_SP, rs_rARM_LR); // Establish stack } else { // If the frame is small enough we are guaranteed to have enough space that remains to diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index ccfa9cf..5b7b8a2 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -128,7 +128,7 @@ class GarbageCollector { // Mark all reachable objects, done concurrently. virtual void MarkingPhase() = 0; - // Only called for concurrent GCs. + // Phase of the GC which is run with mutator lock exclusively held. virtual void PausePhase(); // Called with mutators running. |