summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-05-28 22:43:01 -0700
committerAndreas Gampe <agampe@google.com>2014-05-29 20:50:49 -0700
commitcf4035a4c41ccfcc3e89a0cee25f5218a11b0705 (patch)
tree323d9e98e6129c67e464a3e6857ee02593a2f2c2 /runtime
parent29b53d3d715b1ec19349e8cbf7c5e4ff529bd5fe (diff)
downloadart-cf4035a4c41ccfcc3e89a0cee25f5218a11b0705.zip
art-cf4035a4c41ccfcc3e89a0cee25f5218a11b0705.tar.gz
art-cf4035a4c41ccfcc3e89a0cee25f5218a11b0705.tar.bz2
ART: Use StackReference in Quick Stack Frame
The method reference at the bottom of a quick frame is a stack reference and not a native pointer. This is important for 64b architectures, where the notions do not coincide. Change key methods to have StackReference<mirror::ArtMethod>* parameter instead of mirror::ArtMethod**. Make changes to invoke stubs for 64b archs, change the frame setup for JNI code (both generic JNI and compilers), tie up loose ends. Tested on x86 and x86-64 with host tests. On x86-64, tests succeed with jni compiler activated. x86-64 QCG was not tested. Tested on ARM32 with device tests. Fix ARM64 not saving x19 (used for wSUSPEND) on upcalls. Tested on ARM64 in interpreter-only + generic-jni mode. Fix ARM64 JNI Compiler to work with the CL. Tested on ARM64 in interpreter-only + jni compiler. Change-Id: I77931a0cbadd04d163b3eb8d6f6a6f8740578f13
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arch/arch_test.cc17
-rw-r--r--runtime/arch/arm64/asm_support_arm64.h2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S36
-rw-r--r--runtime/arch/stub_test.cc10
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.h2
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S28
-rw-r--r--runtime/entrypoints/quick/callee_save_frame.h4
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc22
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc15
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc32
-rw-r--r--runtime/entrypoints/quick/quick_fillarray_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc7
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_lock_entrypoints.cc6
-rw-r--r--runtime/entrypoints/quick/quick_thread_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc20
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc151
-rw-r--r--runtime/exception_test.cc4
-rw-r--r--runtime/fault_handler.cc3
-rw-r--r--runtime/handle_scope.h14
-rw-r--r--runtime/mirror/art_method-inl.h12
-rw-r--r--runtime/native/scoped_fast_native_object_access.h2
-rw-r--r--runtime/quick_exception_handler.cc2
-rw-r--r--runtime/quick_exception_handler.h5
-rw-r--r--runtime/stack.cc14
-rw-r--r--runtime/stack.h27
-rw-r--r--runtime/thread.cc13
-rw-r--r--runtime/thread.h2
29 files changed, 255 insertions, 205 deletions
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 45ff21f..5220dc3 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -337,30 +337,22 @@ TEST_F(ArchTest, X86_64) {
// The following tests are all for the running architecture. So we get away
// with just including it and not undefining it every time.
-
#if defined(__arm__)
#include "arch/arm/asm_support_arm.h"
-#undef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
#elif defined(__aarch64__)
#include "arch/arm64/asm_support_arm64.h"
-#undef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
#elif defined(__mips__)
#include "arch/mips/asm_support_mips.h"
-#undef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
#elif defined(__i386__)
#include "arch/x86/asm_support_x86.h"
-#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
#elif defined(__x86_64__)
#include "arch/x86_64/asm_support_x86_64.h"
-#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
#else
// This happens for the host test.
#ifdef __LP64__
#include "arch/x86_64/asm_support_x86_64.h"
-#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
#else
#include "arch/x86/asm_support_x86.h"
-#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
#endif
#endif
@@ -436,4 +428,13 @@ TEST_F(ArchTest, HeapReferenceSize) {
#endif
}
+TEST_F(ArchTest, StackReferenceSize) {
+#if defined(STACK_REFERENCE_SIZE)
+ EXPECT_EQ(sizeof(StackReference<mirror::Object>),
+ static_cast<size_t>(STACK_REFERENCE_SIZE));
+#else
+ LOG(INFO) << "No expected StackReference Size #define found.";
+#endif
+}
+
} // namespace art
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index e55885f..422e20cf 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -43,5 +43,7 @@
// Expected size of a heap reference
#define HEAP_REFERENCE_SIZE 4
+// Expected size of a stack reference
+#define STACK_REFERENCE_SIZE 4
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 97caa1f..28bf856 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -559,8 +559,9 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
.macro INVOKE_STUB_CREATE_FRAME
-SAVE_SIZE=5*8 // x4, x5, SP, LR & FP saved.
-SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
+SAVE_SIZE=6*8 // x4, x5, x19(wSUSPEND), SP, LR & FP saved.
+SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
+
mov x9, sp // Save stack pointer.
.cfi_register sp,x9
@@ -574,8 +575,9 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
.cfi_def_cfa_register x10 // before this.
.cfi_adjust_cfa_offset SAVE_SIZE
- str x9, [x10, #32] // Save old stack pointer.
+ stp x9, x19, [x10, #32] // Save old stack pointer and x19(wSUSPEND)
.cfi_rel_offset sp, 32
+ .cfi_rel_offset x19, 40
stp x4, x5, [x10, #16] // Save result and shorty addresses.
.cfi_rel_offset x4, 16
@@ -597,7 +599,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
// W2 - args length
// X9 - destination address.
// W10 - temporary
- add x9, sp, #8 // Destination address is bottom of stack + NULL.
+ add x9, sp, #4 // Destination address is bottom of stack + NULL.
// Use \@ to differentiate between macro invocations.
.LcopyParams\@:
@@ -611,9 +613,12 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
.LendCopyParams\@:
- // Store NULL into Method* at bottom of frame.
- str xzr, [sp]
+ // Store NULL into StackReference<Method>* at bottom of frame.
+ str wzr, [sp]
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(ARM64) size not as expected."
+#endif
.endm
.macro INVOKE_STUB_CALL_AND_RETURN
@@ -651,7 +656,8 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
str x0, [x4]
.Lexit_art_quick_invoke_stub\@:
- ldr x2, [x29, #32] // Restore stack pointer.
+ ldp x2, x19, [x29, #32] // Restore stack pointer and x19.
+ .cfi_restore x19
mov sp, x2
.cfi_restore sp
@@ -687,7 +693,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
* | uint32_t out[n-1] |
* | : : | Outs
* | uint32_t out[0] |
- * | ArtMethod* NULL | <- SP
+ * | StackRef<ArtMethod> | <- SP value=null
* +----------------------+
*
* Outgoing registers:
@@ -1289,7 +1295,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
@@ -1303,7 +1309,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x3, xSELF // pass Thread::Current
mov x4, sp // pass SP
bl \entrypoint
@@ -1317,7 +1323,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x4, xSELF // pass Thread::Current
mov x5, sp // pass SP
bl \entrypoint
@@ -1356,7 +1362,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
ENTRY art_quick_set64_static
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x3, x1 // Store value
- ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, x3 // Put value param
mov x3, xSELF // pass Thread::Current
mov x4, sp // pass SP
@@ -1420,7 +1426,7 @@ END art_quick_proxy_invoke_handler
* dex method index.
*/
ENTRY art_quick_imt_conflict_trampoline
- ldr x0, [sp, #0] // load caller Method*
+ ldr w0, [sp, #0] // load caller Method*
ldr w0, [x0, #METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
add x0, x0, #OBJECT_ARRAY_DATA_OFFSET // get starting address of data
ldr w0, [x0, x12, lsl 2] // load the target method
@@ -1434,7 +1440,7 @@ ENTRY art_quick_resolution_trampoline
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
mov x9, x0 // Remember returned code pointer in x9.
- ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
+ ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
br x9
1:
@@ -1484,7 +1490,7 @@ END art_quick_resolution_trampoline
* | D2 | float arg 3
* | D1 | float arg 2
* | D0 | float arg 1
- * | RDI/Method* | <- X0
+ * | Method* | <- X0
* #-------------------#
* | local ref cookie | // 4B
* | handle scope size | // 4B
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index fac9883..44edd4b 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -225,7 +225,8 @@ class StubTest : public CommonRuntimeTest {
"cmp x1, x2\n\t"
"b.ne 1f\n\t"
- "mov %[fpr_result], #0\n\t"
+ "mov x2, #0\n\t"
+ "str x2, %[fpr_result]\n\t"
// Finish up.
"2:\n\t"
@@ -247,15 +248,16 @@ class StubTest : public CommonRuntimeTest {
// Failed fpr verification.
"1:\n\t"
- "mov %[fpr_result], #1\n\t"
+ "mov x2, #1\n\t"
+ "str x2, %[fpr_result]\n\t"
"b 2b\n\t" // Goto finish-up
// End
"3:\n\t"
- : [result] "=r" (result), [fpr_result] "=r" (fpr_result)
+ : [result] "=r" (result)
// Use the result from r0
: [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
+ [referrer] "r"(referrer), [fpr_result] "m" (fpr_result)
: "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
#elif defined(__x86_64__)
// Note: Uses the native convention
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 29633fb..bff8501 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -41,5 +41,7 @@
// Expected size of a heap reference
#define HEAP_REFERENCE_SIZE 4
+// Expected size of a stack reference
+#define STACK_REFERENCE_SIZE 4
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 971688d..48c33d5 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -384,16 +384,24 @@ DEFINE_FUNCTION art_quick_invoke_stub
PUSH r9 // Save r9/shorty*.
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
+
movl %edx, %r10d
- addl LITERAL(64), %edx // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
+ addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8 and r9 in frame.
andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
subq %rdx, %rsp // Reserve stack space for argument array.
- movq LITERAL(0), (%rsp) // Store NULL for method*
+
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
+#endif
+ movl LITERAL(0), (%rsp) // Store NULL for method*
+
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // RAX := method to be called
movq %rsi, %r11 // R11 := arg_array
- leaq 8(%rsp), %rdi // Rdi is pointing just above the method* in the stack arguments.
+ leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
+ // stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
@@ -455,16 +463,24 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
PUSH r9 // Save r9/shorty*.
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
+
movl %edx, %r10d
- addl LITERAL(64), %edx // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
+ addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8 and r9 in frame.
andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
subq %rdx, %rsp // Reserve stack space for argument array.
- movq LITERAL(0), (%rsp) // Store NULL for method*
+
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
+#endif
+ movl LITERAL(0), (%rsp) // Store NULL for method*
+
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // RAX := method to be called
movq %rsi, %r11 // R11 := arg_array
- leaq 8(%rsp), %rdi // Rdi is pointing just above the method* in the stack arguments.
+ leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
+ // stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 3fd4adc..b582abb 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -26,12 +26,12 @@ class ArtMethod;
} // namespace mirror
// Place a special frame at the TOS that will save the callee saves for the given type.
-static inline void FinishCalleeSaveFrameSetup(Thread* self, mirror::ArtMethod** sp,
+static inline void FinishCalleeSaveFrameSetup(Thread* self, StackReference<mirror::ArtMethod>* sp,
Runtime::CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Be aware the store below may well stomp on an incoming argument.
Locks::mutator_lock_->AssertSharedHeld(self);
- *sp = Runtime::Current()->GetCalleeSaveMethod(type);
+ sp->Assign(Runtime::Current()->GetCalleeSaveMethod(type));
self->SetTopOfStack(sp, 0);
self->VerifyStack();
}
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index ccc0f3d..3301254 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -27,32 +27,36 @@ namespace art {
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, method, component_count, self, \
@@ -60,7 +64,7 @@ extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, method, component_count, self, \
@@ -68,7 +72,7 @@ extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, method, component_count, self, \
@@ -76,7 +80,7 @@ extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2(
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
if (!instrumented_bool) { \
@@ -87,7 +91,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
if (!instrumented_bool) { \
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 6448045..47fb9d6 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -28,7 +28,7 @@
namespace art {
-extern "C" void artDeoptimize(Thread* self, mirror::ArtMethod** sp)
+extern "C" void artDeoptimize(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index ab428a5..53c9b97 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -28,7 +28,7 @@ namespace art {
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
mirror::ArtMethod* referrer,
Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
@@ -39,7 +39,8 @@ extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -47,10 +48,9 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* referrer,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -59,7 +59,8 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
extern "C" mirror::String* artResolveStringFromCode(mirror::ArtMethod* referrer,
int32_t string_idx,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return ResolveStringFromCode(referrer, string_idx);
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index c38a595..844367d 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -27,7 +27,7 @@ namespace art {
extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int32_t));
@@ -44,7 +44,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int64_t));
@@ -61,7 +61,8 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -79,7 +80,7 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int32_t));
@@ -102,7 +103,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object*
extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int64_t));
@@ -126,7 +127,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object*
extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::ArtMethod* referrer,
Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -149,7 +150,7 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror:
extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int32_t));
@@ -169,7 +170,8 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
}
extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
- uint64_t new_value, Thread* self, mirror::ArtMethod** sp)
+ uint64_t new_value, Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int64_t));
@@ -190,7 +192,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* ref
extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -214,7 +216,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v
extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int32_t));
@@ -240,13 +242,15 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
mirror::ArtMethod* callee_save = runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
uint32_t frame_size =
runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly).FrameSizeInBytes();
- mirror::ArtMethod* referrer = sp[frame_size / sizeof(mirror::ArtMethod*)];
+ mirror::ArtMethod* referrer =
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(
+ reinterpret_cast<uint8_t*>(sp) + frame_size)->AsMirrorPtr();
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int64_t));
if (LIKELY(field != NULL && obj != NULL)) {
@@ -254,7 +258,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
field->Set64<false>(obj, new_value);
return 0; // success
}
- *sp = callee_save;
+ sp->Assign(callee_save);
self->SetTopOfStack(sp, 0);
field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
sizeof(int64_t));
@@ -274,7 +278,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::Object* new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index 8dac750..4ec2879 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -39,7 +39,7 @@ namespace art {
*/
extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array,
const Instruction::ArrayDataPayload* payload,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 11a4b3b..6ef075d 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -26,7 +26,7 @@ namespace art {
extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* method,
mirror::Object* this_object,
Thread* self,
- mirror::ArtMethod** sp,
+ StackReference<mirror::ArtMethod>* sp,
uintptr_t lr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
@@ -40,7 +40,8 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod*
return result;
}
-extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::ArtMethod** sp,
+extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self,
+ StackReference<mirror::ArtMethod>* sp,
uint64_t gpr_result, uint64_t fpr_result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below.
@@ -50,7 +51,7 @@ extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::A
Locks::mutator_lock_->AssertSharedHeld(self);
Runtime* runtime = Runtime::Current();
mirror::ArtMethod* callee_save = runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
- *sp = callee_save;
+ sp->Assign(callee_save);
uint32_t return_pc_offset = callee_save->GetReturnPcOffsetInBytes(
runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly).FrameSizeInBytes());
uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) +
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 5d36b4c..140b075 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -34,7 +34,7 @@ extern uint32_t JniMethodStart(Thread* self) {
DCHECK(env != nullptr);
uint32_t saved_local_ref_cookie = env->local_ref_cookie;
env->local_ref_cookie = env->locals.GetSegmentState();
- mirror::ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+ mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
if (!native_method->IsFastNative()) {
// When not fast JNI we transition out of runnable.
self->TransitionFromRunnableToSuspended(kNative);
@@ -49,7 +49,7 @@ extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) {
// TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI.
static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
- mirror::ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+ mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
bool is_fast = native_method->IsFastNative();
if (!is_fast) {
self->TransitionFromSuspendedToRunnable();
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 817d053..92c0841 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -20,7 +20,8 @@
namespace art {
-extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp)
+extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -42,7 +43,8 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror::
}
}
-extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp)
+extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 53e725e..f61c754 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -28,7 +28,7 @@ void CheckSuspendFromCode(Thread* thread)
CheckSuspend(thread);
}
-extern "C" void artTestSuspendFromCode(Thread* thread, mirror::ArtMethod** sp)
+extern "C" void artTestSuspendFromCode(Thread* thread, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 31eacac..e6f294a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -25,7 +25,8 @@
namespace art {
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::ArtMethod** sp)
+extern "C" void artDeliverPendingExceptionFromCode(Thread* thread,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
thread->QuickDeliverException();
@@ -33,7 +34,7 @@ extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::ArtMe
// Called by generated call to throw an exception.
extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
* exception may be NULL, in which case this routine should
@@ -55,7 +56,7 @@ extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread
// Called by generated call to throw a NPE exception.
extern "C" void artThrowNullPointerExceptionFromCode(Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -64,8 +65,7 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self,
}
// Called by generated call to throw an arithmetic divide by zero exception.
-extern "C" void artThrowDivZeroFromCode(Thread* self,
- mirror::ArtMethod** sp)
+extern "C" void artThrowDivZeroFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowArithmeticExceptionDivideByZero();
@@ -74,14 +74,14 @@ extern "C" void artThrowDivZeroFromCode(Thread* self,
// Called by generated call to throw an array index out of bounds exception.
extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>*sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
-extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::ArtMethod** sp)
+extern "C" void artThrowStackOverflowFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowStackOverflowError(self);
@@ -89,7 +89,7 @@ extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::ArtMethod**
}
extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowNoSuchMethodError(method_idx);
@@ -97,7 +97,7 @@ extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self,
}
extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
CHECK(!dest_type->IsAssignableFrom(src_type));
@@ -106,7 +106,7 @@ extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Cla
}
extern "C" void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 554bff4..1d524cb 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -195,22 +195,22 @@ class QuickArgumentVisitor {
#endif
public:
- static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp)
+ static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK((*sp)->IsCalleeSaveMethod());
+ DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
- return *reinterpret_cast<mirror::ArtMethod**>(previous_sp);
+ return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
}
// For the given quick ref and args quick frame, return the caller's PC.
- static uintptr_t GetCallingPc(mirror::ArtMethod** sp)
+ static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK((*sp)->IsCalleeSaveMethod());
+ DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
- QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static,
+ QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
const char* shorty, uint32_t shorty_len)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
@@ -383,12 +383,12 @@ class QuickArgumentVisitor {
if (kQuickSoftFloatAbi) {
CHECK_EQ(kNumQuickFprArgs, 0U);
return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
- + GetBytesPerGprSpillLocation(kRuntimeISA) /* ArtMethod* */;
+ + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */;
} else {
// For now, there is no reg-spill area for the targets with
// hard float ABI. So, the offset pointing to the first method's
// parameter ('this' for non-static methods) should be returned.
- return GetBytesPerGprSpillLocation(kRuntimeISA); // Skip Method*.
+ return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>.
}
}
@@ -410,8 +410,9 @@ class QuickArgumentVisitor {
// Visits arguments on the stack placing them into the shadow frame.
class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
+ BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+ const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
+ size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -457,7 +458,7 @@ void BuildQuickShadowFrameVisitor::Visit() {
}
extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
@@ -510,9 +511,9 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa
// to jobjects.
class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, ScopedObjectAccessUnchecked* soa,
- std::vector<jvalue>* args) :
+ BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+ const char* shorty, uint32_t shorty_len,
+ ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -577,7 +578,7 @@ void BuildQuickArgumentVisitor::FixupReferences() {
// field within the proxy object, which will box the primitive arguments and deal with error cases.
extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
mirror::Object* receiver,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
@@ -585,7 +586,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
const char* old_cause =
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
- DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method);
+ DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
self->SetTopOfStack(sp, 0);
DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
@@ -629,8 +630,9 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
// so they don't get garbage collected.
class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
public:
- RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
+ RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+ const char* shorty, uint32_t shorty_len,
+ ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -666,7 +668,8 @@ void RememberForGcArgumentVisitor::FixupReferences() {
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
mirror::Object* receiver,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
// Start new JNI local reference state
@@ -821,7 +824,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
// Fixup any locally saved objects may have moved during a GC.
visitor.FixupReferences();
// Place called method in callee-save frame to be placed as first argument to quick method.
- *sp = called;
+ sp->Assign(called);
return code;
}
@@ -1171,14 +1174,14 @@ class ComputeGenericJniFrameSize FINAL {
}
// WARNING: After this, *sp won't be pointing to the method anymore!
- void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
- void* sp, HandleScope** table, uint32_t* handle_scope_entries,
- uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
- void** code_return, size_t* overall_size)
+ void ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty,
+ uint32_t shorty_len, void* sp, HandleScope** table,
+ uint32_t* handle_scope_entries, uintptr_t** start_stack, uintptr_t** start_gpr,
+ uint32_t** start_fpr, void** code_return, size_t* overall_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ComputeAll(is_static, shorty, shorty_len);
- mirror::ArtMethod* method = **m;
+ mirror::ArtMethod* method = (*m)->AsMirrorPtr();
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
@@ -1186,20 +1189,30 @@ class ComputeGenericJniFrameSize FINAL {
// We have to squeeze in the HandleScope, and relocate the method pointer.
// "Free" the slot for the method.
- sp8 += kPointerSize;
+ sp8 += kPointerSize; // In the callee-save frame we use a full pointer.
- // Add the HandleScope.
+ // Under the callee saves put handle scope and new method stack reference.
*handle_scope_entries = num_handle_scope_references_;
- size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSize(num_handle_scope_references_);
- sp8 -= handle_scope_size;
- *table = reinterpret_cast<HandleScope*>(sp8);
+
+ size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
+ size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
+
+ sp8 -= scope_and_method;
+ // Align by kStackAlignment
+ uintptr_t sp_to_align = reinterpret_cast<uintptr_t>(sp8);
+ sp_to_align = RoundDown(sp_to_align, kStackAlignment);
+ sp8 = reinterpret_cast<uint8_t*>(sp_to_align);
+
+ uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
+ *table = reinterpret_cast<HandleScope*>(sp8_table);
(*table)->SetNumberOfReferences(num_handle_scope_references_);
// Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
- sp8 -= kPointerSize;
uint8_t* method_pointer = sp8;
- *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method;
- *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer);
+ StackReference<mirror::ArtMethod>* new_method_ref =
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
+ new_method_ref->Assign(method);
+ *m = new_method_ref;
// Reference cookie and padding
sp8 -= 8;
@@ -1306,8 +1319,8 @@ class ComputeGenericJniFrameSize FINAL {
// of transitioning into native code.
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, Thread* self) :
+ BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static,
+ const char* shorty, uint32_t shorty_len, Thread* self) :
QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
ComputeGenericJniFrameSize fsc;
fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
@@ -1320,7 +1333,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+ sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
}
}
@@ -1488,9 +1501,9 @@ void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock)
* 1) How many bytes of the alloca can be released, if the value is non-negative.
* 2) An error, if the value is negative.
*/
-extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp)
+extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* called = *sp;
+ mirror::ArtMethod* called = sp->AsMirrorPtr();
DCHECK(called->IsNative()) << PrettyMethod(called, true);
// run the visitor
@@ -1562,17 +1575,18 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
* Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
* unlocking.
*/
-extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
+extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
+ StackReference<mirror::ArtMethod>* sp,
jvalue result, uint64_t result_f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
- mirror::ArtMethod* called = *sp;
+ mirror::ArtMethod* called = sp->AsMirrorPtr();
uint32_t cookie = *(sp32 - 1);
jobject lock = nullptr;
if (called->IsSynchronized()) {
HandleScope* table = reinterpret_cast<HandleScope*>(
- reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ reinterpret_cast<uint8_t*>(sp) + sizeof(StackReference<mirror::ArtMethod>));
lock = table->GetHandle(0).ToJObject();
}
@@ -1669,12 +1683,12 @@ static MethodAndCode GetSuccessValue(const void* code, mirror::ArtMethod* method
template<InvokeType type, bool access_check>
static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp);
+ Thread* self, StackReference<mirror::ArtMethod>* sp);
template<InvokeType type, bool access_check>
static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp) {
+ Thread* self, StackReference<mirror::ArtMethod>* sp) {
mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
type);
if (UNLIKELY(method == nullptr)) {
@@ -1714,7 +1728,8 @@ static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_o
MethodAndCode artInvokeCommon<type, access_check>(uint32_t method_idx, \
mirror::Object* this_object, \
mirror::ArtMethod* caller_method, \
- Thread* self, mirror::ArtMethod** sp) \
+ Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
@@ -1731,48 +1746,43 @@ EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
// See comments in runtime_support_asm.S
extern "C" MethodAndCode artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
}
@@ -1780,7 +1790,8 @@ extern "C" MethodAndCode artInvokeVirtualTrampolineWithAccessCheck(uint32_t meth
extern "C" MethodAndCode artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
mirror::Object* this_object,
mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method;
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 751cdb6..99633a3 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -192,7 +192,9 @@ TEST_F(ExceptionTest, StackTraceElement) {
fake_stack.push_back(0);
// Set up thread to appear as if we called out of method_g_ at pc dex 3
- thread->SetTopOfStack(reinterpret_cast<mirror::ArtMethod**>(&fake_stack[0]), method_g_->ToNativePc(dex_pc)); // return pc
+ thread->SetTopOfStack(
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]),
+ method_g_->ToNativePc(dex_pc)); // return pc
} else {
// Create/push fake 20-byte shadow frame for method g
fake_stack.push_back(0);
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 15c38c1..6b216c7 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -250,7 +250,8 @@ bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp);
Thread* self = Thread::Current();
// Inside of generated code, sp[0] is the method, so sp is the frame.
- mirror::ArtMethod** frame = reinterpret_cast<mirror::ArtMethod**>(sp);
+ StackReference<mirror::ArtMethod>* frame =
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(sp);
self->SetTopOfStack(frame, 0); // Since we don't necessarily have a dex pc, pass in 0.
self->DumpJavaStack(LOG(ERROR));
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index f2e059d..8ff7086 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -51,20 +51,12 @@ class HandleScope {
return header_size + data_size;
}
- // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedHandleScopeSize(uint32_t num_references) {
- size_t handle_scope_size = SizeOf(num_references);
- return RoundUp(handle_scope_size, 8);
- }
-
- // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedHandleScopeSizeTarget(size_t pointer_size, uint32_t num_references) {
+ // Returns the size of a HandleScope containing num_references handles.
+ static size_t SizeOf(size_t pointer_size, uint32_t num_references) {
// Assume that the layout is packed.
size_t header_size = pointer_size + sizeof(number_of_references_);
- // This assumes there is no layout change between 32 and 64b.
size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
- size_t handle_scope_size = header_size + data_size;
- return RoundUp(handle_scope_size, 8);
+ return header_size + data_size;
}
// Link to previous HandleScope or null.
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 39efa58..5f4619b 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -296,10 +296,16 @@ inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
// Generic JNI frame.
DCHECK(IsNative());
uint32_t handle_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1;
- size_t scope_size = HandleScope::GetAlignedHandleScopeSize(handle_refs);
+ size_t scope_size = HandleScope::SizeOf(handle_refs);
QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + scope_size,
- callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+
+ // Callee saves + handle scope + method ref + alignment
+ size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
+ - kPointerSize // callee-save frame stores a whole method pointer
+ + sizeof(StackReference<mirror::ArtMethod>),
+ kStackAlignment);
+
+ return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
}
const void* code_pointer = EntryPointToCodePointer(entry_point);
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 822aefa..606d62d 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -31,7 +31,7 @@ class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable {
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
: ScopedObjectAccessAlreadyRunnable(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
+ DCHECK(Self()->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr()->IsFastNative());
// Don't work with raw objects in non-runnable states.
DCHECK_EQ(Self()->GetState(), kRunnable);
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8300195..243818c 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -42,7 +42,7 @@ void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this);
visitor.WalkStack(true);
- mirror::ArtMethod* catch_method = *handler_quick_frame_;
+ mirror::ArtMethod* catch_method = handler_quick_frame_->AsMirrorPtr();
if (kDebugExceptionDelivery) {
if (catch_method == nullptr) {
LOG(INFO) << "Handler is upcall";
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index ef3766c..2597ebd 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -19,6 +19,7 @@
#include "base/logging.h"
#include "base/mutex.h"
+#include "stack.h" // StackReference
namespace art {
@@ -50,7 +51,7 @@ class QuickExceptionHandler {
void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetHandlerQuickFrame(mirror::ArtMethod** handler_quick_frame) {
+ void SetHandlerQuickFrame(StackReference<mirror::ArtMethod>* handler_quick_frame) {
handler_quick_frame_ = handler_quick_frame;
}
@@ -77,7 +78,7 @@ class QuickExceptionHandler {
// Is method tracing active?
const bool method_tracing_active_;
// Quick frame with found handler or last frame if no handler found.
- mirror::ArtMethod** handler_quick_frame_;
+ StackReference<mirror::ArtMethod>* handler_quick_frame_;
// PC to branch to for the handler.
uintptr_t handler_quick_frame_pc_;
// Associated dex PC.
diff --git a/runtime/stack.cc b/runtime/stack.cc
index be1fba4..6633159 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -205,16 +205,16 @@ void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) {
}
uintptr_t StackVisitor::GetReturnPc() const {
- mirror::ArtMethod** sp = GetCurrentQuickFrame();
+ byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
DCHECK(sp != NULL);
- byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+ byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
- mirror::ArtMethod** sp = GetCurrentQuickFrame();
+ byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
CHECK(sp != NULL);
- byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+ byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -307,7 +307,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == NULL);
- mirror::ArtMethod* method = *cur_quick_frame_;
+ mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
while (method != NULL) {
SanityCheckFrame();
bool should_continue = VisitFrame();
@@ -352,9 +352,9 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
cur_quick_frame_pc_ = return_pc;
byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
- cur_quick_frame_ = reinterpret_cast<mirror::ArtMethod**>(next_frame);
+ cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
cur_depth_++;
- method = *cur_quick_frame_;
+ method = cur_quick_frame_->AsMirrorPtr();
}
} else if (cur_shadow_frame_ != NULL) {
do {
diff --git a/runtime/stack.h b/runtime/stack.h
index 2e32f51..e93fcbc 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -429,11 +429,11 @@ class PACKED(4) ManagedStack {
return link_;
}
- mirror::ArtMethod** GetTopQuickFrame() const {
+ StackReference<mirror::ArtMethod>* GetTopQuickFrame() const {
return top_quick_frame_;
}
- void SetTopQuickFrame(mirror::ArtMethod** top) {
+ void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
DCHECK(top_shadow_frame_ == NULL);
top_quick_frame_ = top;
}
@@ -491,7 +491,7 @@ class PACKED(4) ManagedStack {
private:
ManagedStack* link_;
ShadowFrame* top_shadow_frame_;
- mirror::ArtMethod** top_quick_frame_;
+ StackReference<mirror::ArtMethod>* top_quick_frame_;
uintptr_t top_quick_frame_pc_;
};
@@ -512,17 +512,7 @@ class StackVisitor {
if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetMethod();
} else if (cur_quick_frame_ != nullptr) {
- return *cur_quick_frame_;
- } else {
- return nullptr;
- }
- }
-
- mirror::ArtMethod** GetMethodAddress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (cur_shadow_frame_ != nullptr) {
- return cur_shadow_frame_->GetMethodAddress();
- } else if (cur_quick_frame_ != nullptr) {
- return cur_quick_frame_;
+ return cur_quick_frame_->AsMirrorPtr();
} else {
return nullptr;
}
@@ -578,7 +568,8 @@ class StackVisitor {
void SetGPR(uint32_t reg, uintptr_t value);
// This is a fast-path for getting/setting values in a quick frame.
- uint32_t* GetVRegAddr(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
+ uint32_t* GetVRegAddr(StackReference<mirror::ArtMethod>* cur_quick_frame,
+ const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
uint16_t vreg) const {
int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
@@ -679,7 +670,7 @@ class StackVisitor {
return cur_quick_frame_pc_;
}
- mirror::ArtMethod** GetCurrentQuickFrame() const {
+ StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const {
return cur_quick_frame_;
}
@@ -688,7 +679,7 @@ class StackVisitor {
}
HandleScope* GetCurrentHandleScope() const {
- mirror::ArtMethod** sp = GetCurrentQuickFrame();
+ StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame();
++sp; // Skip Method*; handle scope comes next;
return reinterpret_cast<HandleScope*>(sp);
}
@@ -706,7 +697,7 @@ class StackVisitor {
Thread* const thread_;
ShadowFrame* cur_shadow_frame_;
- mirror::ArtMethod** cur_quick_frame_;
+ StackReference<mirror::ArtMethod>* cur_quick_frame_;
uintptr_t cur_quick_frame_pc_;
// Lazily computed, number of frames in the stack.
size_t num_frames_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 1355aa1..a8135e0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2012,9 +2012,14 @@ class ReferenceMapVisitor : public StackVisitor {
private:
void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod** method_addr = GetMethodAddress();
- visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this);
- mirror::ArtMethod* m = *method_addr;
+ StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
+ mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr();
+ mirror::ArtMethod* old_method = m;
+ visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this);
+ if (m != old_method) {
+ cur_quick_frame->Assign(m);
+ }
+
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
const uint8_t* native_gc_map = m->GetNativeGcMap();
@@ -2035,7 +2040,7 @@ class ReferenceMapVisitor : public StackVisitor {
const VmapTable vmap_table(m->GetVmapTable(code_pointer));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
// For all dex registers in the bitmap
- mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+ StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
// Does this register hold a reference?
diff --git a/runtime/thread.h b/runtime/thread.h
index 08bbcae..4601248 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -345,7 +345,7 @@ class Thread {
ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetTopOfStack(mirror::ArtMethod** top_method, uintptr_t pc) {
+ void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
}